diff --git a/.golangci.yml b/.golangci.yml index f24c3f8d165..f6af2fb45ca 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,6 +5,12 @@ run: - vendor skip-dirs-use-default: true modules-download-mode: vendor + build-tags: + - "aro" + - "containers_image_openpgp" + - "exclude_graphdriver_devicemapper" + - "exclude_graphdriver_btrfs" + go: "1.18" issues: exclude-rules: diff --git a/Makefile b/Makefile index a0e9dd9bd65..feb73627e51 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,9 @@ TAG ?= $(shell git describe --exact-match 2>/dev/null) COMMIT = $(shell git rev-parse --short=7 HEAD)$(shell [[ $$(git status --porcelain) = "" ]] || echo -dirty) ARO_IMAGE_BASE = ${RP_IMAGE_ACR}.azurecr.io/aro E2E_FLAGS ?= -test.v --ginkgo.v --ginkgo.timeout 180m --ginkgo.flake-attempts=2 --ginkgo.junit-report=e2e-report.xml +GO_FLAGS ?= -tags=aro,containers_image_openpgp,exclude_graphdriver_btrfs,exclude_graphdriver_devicemapper + +export GOFLAGS=$(GO_FLAGS) # fluentbit version must also be updated in RP code, see pkg/util/version/const.go MARINER_VERSION = 20230321 @@ -47,13 +50,13 @@ endif endif build-all: - go build -tags aro,containers_image_openpgp ./... + go build ./... aro: check-release generate - go build -tags aro,containers_image_openpgp,codec.safe -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro + go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro runlocal-rp: - go run -tags aro,containers_image_openpgp -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro rp + go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro rp az: pyenv . pyenv/bin/activate && \ @@ -74,7 +77,7 @@ client: generate # TODO: hard coding dev-config.yaml is clunky; it is also probably convenient to # override COMMIT. deploy: - go run -tags aro,containers_image_openpgp -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro deploy dev-config.yaml ${LOCATION} + go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro deploy dev-config.yaml ${LOCATION} dev-config.yaml: go run ./hack/gendevconfig >dev-config.yaml @@ -146,7 +149,7 @@ proxy: CGO_ENABLED=0 go build -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./hack/proxy run-portal: - go run -tags aro,containers_image_openpgp -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro portal + go run -ldflags "-X github.com/Azure/ARO-RP/pkg/util/version.GitCommit=$(VERSION)" ./cmd/aro portal build-portal: cd portal/v1 && npm install && npm run build && cd ../v2 && npm install && npm run build @@ -209,10 +212,10 @@ validate-fips: hack/fips/validate-fips.sh unit-test-go: - go run gotest.tools/gotestsum@v1.9.0 --format pkgname --junitfile report.xml -- -tags=aro,containers_image_openpgp -coverprofile=cover.out ./... + go run gotest.tools/gotestsum@v1.9.0 --format pkgname --junitfile report.xml -- -coverprofile=cover.out ./... unit-test-go-coverpkg: - go run gotest.tools/gotestsum@v1.9.0 --format pkgname --junitfile report.xml -- -tags=aro,containers_image_openpgp -coverpkg=./... -coverprofile=cover_coverpkg.out ./... + go run gotest.tools/gotestsum@v1.9.0 --format pkgname --junitfile report.xml -- -coverpkg=./... -coverprofile=cover_coverpkg.out ./... lint-go: hack/lint-go.sh diff --git a/go.mod b/go.mod index 2d282c71d04..b52c2f4f867 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,8 @@ require ( github.com/alvaroloes/enumer v1.1.2 github.com/apparentlymart/go-cidr v1.1.0 github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9 - github.com/containers/image/v5 v5.21.0 + github.com/containers/image/v5 v5.21.1 + github.com/containers/podman/v4 v4.1.1 github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-semver v0.3.0 github.com/coreos/go-systemd/v22 v22.3.2 @@ -44,6 +45,7 @@ require ( github.com/onsi/ginkgo/v2 v2.3.1 github.com/onsi/gomega v1.22.0 github.com/open-policy-agent/frameworks/constraint v0.0.0-20221109005544-7de84dff5081 + github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a github.com/openshift/console-operator v0.0.0-20220407014945-45d37e70e0c2 @@ -101,6 +103,7 @@ require ( github.com/IBM/vpc-go-sdk v1.0.1 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/hcsshim v0.9.2 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.2.0 // indirect @@ -115,19 +118,28 @@ require ( github.com/blang/semver v3.5.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cjlapao/common-go v0.0.39 // indirect github.com/clarketm/json v1.17.1 // indirect + github.com/containerd/cgroups v1.0.3 // indirect + github.com/containerd/containerd v1.6.4 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect + github.com/containers/buildah v1.26.1 // indirect + github.com/containers/common v0.48.0 // indirect github.com/containers/image v3.0.2+incompatible // indirect github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect - github.com/containers/ocicrypt v1.1.3 // indirect - github.com/containers/storage v1.39.0 // indirect + github.com/containers/ocicrypt v1.1.4 // indirect + github.com/containers/psgo v1.7.2 // indirect + github.com/containers/storage v1.40.2 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/vcontext v0.0.0-20220326205524-7fcaf69e7050 // indirect + github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v20.10.24+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect @@ -147,11 +159,13 @@ require ( github.com/go-stack/stack v1.8.1 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/gobuffalo/flect v0.2.5 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/cel-go v0.10.2 // indirect + github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/renameio v1.0.1 // indirect @@ -160,6 +174,7 @@ require ( github.com/googleapis/gax-go/v2 v2.2.0 // indirect github.com/gophercloud/gophercloud v0.24.0 // indirect github.com/gophercloud/utils v0.0.0-20220307143606-8e7800759d16 // indirect + github.com/gorilla/schema v1.2.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -169,20 +184,23 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jinzhu/copier v0.3.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/compress v1.15.2 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/libvirt/libvirt-go v7.4.0+incompatible // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/metal3-io/baremetal-operator v0.0.0-20220405082045-575f5c90718a // indirect github.com/metal3-io/baremetal-operator/apis v0.0.0 // indirect @@ -196,26 +214,30 @@ require ( github.com/microsoft/kiota-serialization-text-go v1.0.0 // indirect github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.0 // indirect + github.com/moby/sys/mountinfo v0.6.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/nxadm/tail v1.4.8 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 // indirect + github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect github.com/opencontainers/runc v1.1.5 // indirect - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect + github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f // indirect + github.com/opencontainers/selinux v1.10.1 // indirect github.com/openshift/cloud-credential-operator v0.0.0-20220316185125-ed0612946f4b // indirect github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668 // indirect github.com/openshift/cluster-api-provider-baremetal v0.0.0-20220218121658-fc0acaaec338 // indirect github.com/openshift/cluster-api-provider-ibmcloud v0.0.1-0.20220201105455-8014e5e894b0 // indirect github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603 // indirect github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519 // indirect + github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/ovirt/go-ovirt v0.0.0-20210308100159-ac0bcbc88d7c // indirect github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect github.com/pborman/uuid v1.2.1 // indirect @@ -233,11 +255,16 @@ require ( github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/sylabs/sif/v2 v2.7.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect + github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.2 // indirect github.com/vbauerster/mpb/v7 v7.4.1 // indirect github.com/vmware/govmomi v0.27.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.1.0 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.etcd.io/bbolt v1.3.6 // indirect @@ -262,6 +289,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiserver v0.24.7 // indirect diff --git a/go.sum b/go.sum index 490932f4dbc..be12c380a13 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,14 @@ +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.58.0/go.mod h1:W+9FnSUw6nhVwXlFcp1eL+krq5+HQUJeUogSeJZZiWg= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= @@ -19,18 +23,24 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/AlecAivazis/survey/v2 v2.3.4 h1:pchTU9rsLUSvWEl2Aq9Pv3k0IE2fkqtGxazskAMd9Ng= github.com/AlecAivazis/survey/v2 v2.3.4/go.mod h1:hrV6Y/kQCLhIZXGcriDCUBtB3wnN7156gMXJ3+b23xM= +github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= +github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/azure-sdk-for-go v63.1.0+incompatible h1:yNC7qlSUWVF8p0TzxdmWW1FJ3DdIA+0Pge41IU/2+9U= github.com/Azure/azure-sdk-for-go v63.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= @@ -73,6 +83,7 @@ github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e h1:4ZrkT/RzpnRO github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e/go.mod h1:uw9h2sd4WWHOPdJ13MQpwK5qYWKYDumDqxWWIknEQ+k= github.com/ClickHouse/clickhouse-go v1.4.9/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= @@ -98,8 +109,13 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= @@ -122,6 +138,7 @@ github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+V github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= @@ -129,9 +146,8 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63n github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -140,13 +156,13 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU= @@ -161,16 +177,21 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1550 h1:4OAB2kIEtbJ01IqPcUgJCHun0kJYMY0e+zK8TmYjCl4= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1550/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/alvaroloes/enumer v1.1.2 h1:5khqHB33TZy1GWCO/lZwcroBFh7u+0j40T83VUbfAMY= github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= @@ -186,13 +207,18 @@ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:o github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.35.20/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.34 h1:8+P+773CDgQqN1eLH1QHT6XgXHUbME3sAbDGszzjajY= github.com/aws/aws-sdk-go v1.43.34/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= @@ -212,20 +238,26 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCS github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -240,10 +272,17 @@ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cb github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0/go.mod h1:67kWC1PXQLR3lM/mmNnu3Kzn7K4TSWZAGUuQP1JSngk= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.2.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= @@ -271,6 +310,7 @@ github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoC github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9 h1:88tJLy+/ao5kPBv1EtNyduXeWrTHV47seJPgI7pWgDs= github.com/codahale/etm v0.0.0-20141003032925-c00c9e6fb4c9/go.mod h1:jy75q4Q7stkoOx8bCRnIm0t1Vh6Pt4OJvcwA9+oJsqI= +github.com/container-orchestrated-devices/container-device-interface v0.4.0/go.mod h1:E1zcucIkq9P3eyNmY+68dBQsTcsXJh9cgRo2IVNScKQ= github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= @@ -278,6 +318,7 @@ github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqh github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= @@ -286,6 +327,9 @@ github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkX github.com/containerd/containerd v1.4.11/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig= +github.com/containerd/containerd v1.6.4 h1:SEDZBp10mhCp+hkO3Njz/YhGrI7ah3edNcUlRdUPOgg= +github.com/containerd/containerd v1.6.4/go.mod h1:oWOqbuJUZmOVafhA0lj2NAXbiO1u7F0K5l1bUgdyo94= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= @@ -297,16 +341,20 @@ github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O1 github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.5/go.mod h1:Rf2ZrMycr1El589IyuRzn7RkfdRZVKaFGaxSDHVAjj0= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= -github.com/containerd/stargz-snapshotter/estargz v0.11.1/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ= -github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= +github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= @@ -319,20 +367,34 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.0/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.0/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.0.0/go.mod h1:liDVn61uqF5YCAh8W4VNt2cXb8h20RjRQqsRfiZIRaI= +github.com/containers/buildah v1.26.1 h1:D65Vuo+orsI14WWtJhSX6KrpgBBa7+hveVWevzG8p8E= +github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY= +github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw= +github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0= +github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image v3.0.2+incompatible h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE= github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= -github.com/containers/image/v5 v5.21.0 h1:pDS3kjJBlaGDItKzjvJDqKXwyQs01gv54b6QuMuaH4g= -github.com/containers/image/v5 v5.21.0/go.mod h1:2nEPM0WuinC/0ssPsMv5Iy8YaRueUUTmTp3C7bn5uro= +github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q= +github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.3 h1:uMxn2wTb4nDR7GqG3rnZSfpJXqWURfzZ7nKydzIeKpA= github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= -github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM= -github.com/containers/storage v1.39.0 h1:NV93CVx6KAQ04cldeJyqa7uDZivhmO3rXla1cyn75dk= -github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs= +github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.4 h1:V0ktirShnF1iJ2ithuoYE4eNAOSL3af1PlTiykv3PLQ= +github.com/containers/ocicrypt v1.1.4/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/podman/v4 v4.1.1 h1:ulT4QEn49K7ApsP9vtl0PZp/gfipuYEdqcUbA6r7mpw= +github.com/containers/podman/v4 v4.1.1/go.mod h1:ZgZCaL1EAnRXPbCUVQ3P24UZ+uGAGUTXLysvEBwpmkY= +github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc= +github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0= +github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= +github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc= +github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= +github.com/containers/storage v1.40.2 h1:GUlHaGnrs1JOEwv6YEvkQdgYXOXZdU1Angy4wgWNgF8= +github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= @@ -377,6 +439,7 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -392,24 +455,31 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= +github.com/digitalocean/go-libvirt v0.0.0-20201209184759-e2a69bcd5bd1/go.mod h1:QS1XzqZLcDniNYrN7EZefq3wIyb/M2WmJbql4ZKoc1Q= +github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001/go.mod h1:IetBE52JfFxK46p2n2Rqm+p5Gx1gpu2hRHsrbnPOWZQ= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= +github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -424,6 +494,7 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.1-ce+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -432,19 +503,23 @@ github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05 github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dtylman/scp v0.0.0-20181017070807-f3000a34aef4/go.mod h1:jN1ZaUPSNA8jm10nmaRLky84qV/iCeiHmcEf3EbP+dc= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -459,7 +534,10 @@ github.com/emicklei/go-restful v2.9.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -472,14 +550,17 @@ github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -496,7 +577,11 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= +github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= +github.com/fsouza/go-dockerclient v1.7.11/go.mod h1:zvYxutUNOK853i1s7VywZxQgxSHbm7A6en/q9MHBN6k= +github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= @@ -512,18 +597,15 @@ github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.1.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.3.0/go.mod h1:xdX4bWJ48aOrdhnl2XqHYstHbbp6+LFS4r4X+lNVprw= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -550,6 +632,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= @@ -593,6 +677,7 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.10.1 h1:uA0+amWMiglNZKZ9FJRKUAe9U3RX91eVn1JYXMWt7ig= github.com/go-playground/validator/v10 v10.10.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -604,6 +689,18 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/go-test/deep v1.0.5/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= @@ -621,8 +718,11 @@ github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6 github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -653,6 +753,7 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71 github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -668,6 +769,16 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -679,6 +790,7 @@ github.com/google/cel-go v0.10.2 h1:fJtfqBC/zg/+M0W32IemohwB6u5oFWv1iVGNpgUxan0= github.com/google/cel-go v0.10.2/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/gnostic v0.5.5 h1:xaJtlbPCF2oT4Aidl/Al5W6lRq7g5+biHTihznoaa7k= github.com/google/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -694,12 +806,14 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-containerregistry v0.8.0/go.mod h1:wW5v71NHGnQyb4k+gSshjxidrC7lN33MdWEn+Mz9TsI= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v27 v27.0.4/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= +github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -724,6 +838,8 @@ github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -733,10 +849,12 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTKaONwE= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gophercloud/gophercloud v0.15.1-0.20210202035223-633d73521055/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/gophercloud v0.18.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/gophercloud v0.19.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= @@ -748,11 +866,15 @@ github.com/gophercloud/utils v0.0.0-20220307143606-8e7800759d16 h1:slt/exMiitZNY github.com/gophercloud/utils v0.0.0-20220307143606-8e7800759d16/go.mod h1:qOGlfG6OIJ193/c3Xt/XjOfHataNZdQcVgiu93LxBUM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/csrf v1.7.1 h1:Ir3o2c1/Uzj6FBxMlAUB6SivgVMy1ONXwYgXn+/aHPE= github.com/gorilla/csrf v1.7.1/go.mod h1:+a/4tCmqhG6/w4oafeAZ9pEa3/NZOWYVbD9fV0FwIQA= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -760,23 +882,40 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc= +github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-health-probe v0.3.2/go.mod h1:izVOQ4RWbjUR6lm4nn+VLJyQ+FyaiGmprEYgI04Gs7U= github.com/h2non/filetype v1.1.1 h1:xvOwnXKAckvtLWsN398qS9QhlxlnVXBjXBydK2/UFB4= @@ -811,9 +950,11 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -824,22 +965,29 @@ github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7U github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= github.com/iancoleman/strcase v0.1.2/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= @@ -851,6 +999,12 @@ github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234 h1:R0Hokq55Hv3 github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234/go.mod h1:kZxm8EB19+pd3nT92t0aQblXb7K4sHG8cp59cleqdNc= github.com/jewzaam/installer-aro v0.9.0-master.0.20220524230743-7e2aa7a0cc1a h1:dz4VJywAe1LvRMqwl8mkaVLb0vfvp+4NBT+EaxXj6oE= github.com/jewzaam/installer-aro v0.9.0-master.0.20220524230743-7e2aa7a0cc1a/go.mod h1:ak+tefHa9dwYifd0+K910jIyFuRuqHjtr/htkxI8IZo= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -866,12 +1020,18 @@ github.com/joelanford/ignore v0.0.0-20210607151042-0d25dc18b62d/go.mod h1:7HQupe github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jongio/azidext/go/azidext v0.4.0 h1:TOYyVFMeWGgXNhURSgrEtUCu7JAAKgsy+5C4+AEfYlw= github.com/jongio/azidext/go/azidext v0.4.0/go.mod h1:VrlpGde5B+pPbTUxnThE5UIQQkcebdr3jrC2MmlMVSI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -880,30 +1040,35 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= +github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -922,18 +1087,27 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/libvirt/libvirt-go v4.10.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= github.com/libvirt/libvirt-go v7.4.0+incompatible h1:crnSLkwPqCdXtg6jib/FxBG/hweAc/3Wxth1AehCXL4= @@ -945,8 +1119,9 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/magefile/mage v1.13.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -957,15 +1132,19 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -982,21 +1161,35 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= +github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -1018,12 +1211,15 @@ github.com/microsoftgraph/msgraph-sdk-go v1.4.0/go.mod h1:JIDL1xENx92B60NjO2ACyq github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -1031,6 +1227,7 @@ github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMF github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -1046,14 +1243,17 @@ github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGg github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc= +github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= @@ -1061,6 +1261,7 @@ github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXy github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1069,11 +1270,16 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1082,7 +1288,10 @@ github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq4 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= @@ -1091,9 +1300,13 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= +github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -1106,6 +1319,7 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1125,6 +1339,7 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/ginkgo/v2 v2.3.1 h1:8SbseP7qM32WcvE6VaN6vfXxv698izmsJ1UQX9ve7T8= github.com/onsi/ginkgo/v2 v2.3.1/go.mod h1:Sv4yQXwG5VmF7tm3Q5Z+RWUpPo24LF1mpnz2crUb8Ys= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1140,6 +1355,7 @@ github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7 github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.22.0 h1:AIg2/OntwkBiCg5Tt1ayyiF1ArFrWFoCSMtMi/wdApk= @@ -1156,21 +1372,34 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 h1:g47eG1u/gw0JB7mZ88TcHKCmsy7sWUNZD8ZS9Jhi0O8= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= +github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= +github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20201121164853-7413a7f753e1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab h1:YQZXa3elcHgKXAa2GjVFC9M3JeP7ZPyFD1YByDx/dgQ= +github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20190417131837-cd1349b7c47e/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f h1:MMcsVl0FAVEahmXTy+uXoDTw3yJq7nGrK8ITs/kkreo= +github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= +github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/openshift/api v0.0.0-20220124143425-d74727069f6f h1:iOTv1WudhVm2UsoST+L+ZrA5A9w57h9vmQsdlBuqG6g= github.com/openshift/api v0.0.0-20220124143425-d74727069f6f/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= @@ -1209,6 +1438,7 @@ github.com/openshift/console-operator v0.0.0-20220318130441-e44516b9c315 h1:zmwv github.com/openshift/console-operator v0.0.0-20220318130441-e44516b9c315/go.mod h1:jCX07P5qFcuJrzd0xO5caxLjvSscehiaq6We/hGlcW8= github.com/openshift/hive/apis v0.0.0-20220719141355-c63c9b0281d8 h1:7e4sMDIstjEKW6SmPv8VhusDaYinDBrspd1M7ybIHC8= github.com/openshift/hive/apis v0.0.0-20220719141355-c63c9b0281d8/go.mod h1:XWo9dsulk75E9xkfxS/GNpJrL5UHgn3wuSyPeO39NME= +github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/openshift/library-go v0.0.0-20220303081124-fb4e7a2872f0 h1:hiwAdZ5ishMe4qtUejv+CuBWra18cjZMHVFlVPOZnw0= github.com/openshift/library-go v0.0.0-20220303081124-fb4e7a2872f0/go.mod h1:6AmNM4N4nHftckybV/U7bQW+5AvK5TW81ndSI6KEidw= github.com/openshift/machine-api-operator v0.2.1-0.20220124104622-668c5b52b104/go.mod h1:1j0Au43h8Sn2B81FxOudqcmKnzvMNEH+vfg5y1g2xAk= @@ -1234,6 +1464,7 @@ github.com/operator-framework/operator-manifest-tools v0.2.0/go.mod h1:1YjcCfLUw github.com/operator-framework/operator-registry v1.21.0/go.mod h1:qDxBCYPeOMlOXd95Zi1q4GpiKwK9i9Mag1AkrMOoFNU= github.com/operator-framework/operator-sdk v1.21.0/go.mod h1:dzQr/JCCqzrMCAwvwdlOd0betJPHy0kvP2TjOyUfvrY= github.com/oras-project/oras v0.12.0/go.mod h1:Bj/Uj06ncxBdgseoABRh1zyKm2/FZ9HtF7Y+EQAwrG4= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -1258,6 +1489,7 @@ github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1275,8 +1507,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= @@ -1297,6 +1531,7 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= @@ -1338,6 +1573,16 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= +github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ= +github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= @@ -1353,32 +1598,42 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rootless-containers/rootlesskit v1.0.1/go.mod h1:t2UAiYagxrJ+wmpFAUIZPcqsm4k2B7ve6g7lILKbloc= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= github.com/serge1peshcoff/selenium-go-conditions v0.0.0-20170824121757-5afbdb74596b h1:jLwzNAxsHzKw5sHju7bUk0iQSynZxWAOtnXD5d37Vto= github.com/serge1peshcoff/selenium-go-conditions v0.0.0-20170824121757-5afbdb74596b/go.mod h1:noHZFMVoy0oY+ICCojiGUgv+/ecK+1M6huoUVWAIJoU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= @@ -1413,14 +1668,18 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= @@ -1439,6 +1698,7 @@ github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlk github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= @@ -1451,6 +1711,8 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1461,23 +1723,38 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo= -github.com/sylabs/sif/v2 v2.4.2/go.mod h1:6gQvzNKRIqr4FS08XBfHpkpnxv9b7h58GLkSJ1zdK9A= +github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk= +github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= +github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tebeka/selenium v0.9.9 h1:cNziB+etNgyH/7KlNI7RMC1ua5aH1+5wUlFQyzeMh+w= github.com/tebeka/selenium v0.9.9/go.mod h1:5Fr8+pUvU6B1OiPfkdCKdXZyr5znvVkxuPd0NOdZCQc= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/thoas/go-funk v0.8.0/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/u-root/uio v0.0.0-20210528114334-82958018845c/go.mod h1:LpEX5FO/cB+WF4TYGY1V5qktpaZLkKkSegbr0V4eYXA= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/ugorji/go v1.2.0/go.mod h1:1ny++pKMXhLWrwWV5Nf+CbOuZJhMoaFD+0GMFfd8fEc= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v1.2.0/go.mod h1:dXvG35r7zTX6QImXOSFhGMmKtX+wJ7VTWzGvYQGIjBs= @@ -1485,25 +1762,34 @@ github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0 github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= -github.com/vbauerster/mpb/v7 v7.3.2/go.mod h1:wfxIZcOJq/bG1/lAtfzMXcOiSvbqVi/5GX5WCSi+IsA= github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA= github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo= +github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -1518,7 +1804,6 @@ github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8L github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= @@ -1535,9 +1820,14 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= +github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1558,8 +1848,10 @@ github.com/zmap/zcrypto v0.0.0-20200513165325-16679db567ff/go.mod h1:TxpejqcVKQj github.com/zmap/zcrypto v0.0.0-20200911161511-43ff0ea04f21/go.mod h1:TxpejqcVKQjQaVVmMGfzx5HnmFMdIU+vLtaCyPBfGI4= github.com/zmap/zlint/v2 v2.2.1/go.mod h1:ixPWsdq8qLxYRpNUTbcKig3R7WgmspsHGLhCCs6rFAM= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -1570,6 +1862,7 @@ go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD0 go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.mongodb.org/mongo-driver v1.9.4 h1:qXWlnK2WCOWSxJ/Hm3XyYOGKv3ujA2btBsCyuIFvQjc= go.mongodb.org/mongo-driver v1.9.4/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -1616,6 +1909,7 @@ go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= @@ -1631,6 +1925,7 @@ go4.org v0.0.0-20200104003542-c7e774b10ea0 h1:M6XsnQeLwG+rHQ+/rrGh3puBI3WZEy9TBW golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1655,13 +1950,12 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1676,6 +1970,7 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -1683,7 +1978,9 @@ golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+o golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -1701,7 +1998,10 @@ golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= @@ -1720,7 +2020,9 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1729,12 +2031,16 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1754,14 +2060,15 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1779,6 +2086,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1803,12 +2111,16 @@ golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606122018-79a91cf218c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1817,6 +2129,7 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1843,6 +2156,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1855,6 +2169,8 @@ golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201116194326-cc9327a14d48/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1865,13 +2181,15 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1879,22 +2197,27 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210921065528-437939a70204/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1935,23 +2258,34 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524210228-3d17549cdc6b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191004183538-27eeabb02079/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1960,8 +2294,10 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1969,7 +2305,12 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1979,15 +2320,40 @@ golang.org/x/tools v0.0.0-20200610160956-3e83d1e96d0e/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.7-0.20210921203514-b98090b833e3/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= @@ -2012,6 +2378,7 @@ gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZ google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= @@ -2024,18 +2391,24 @@ google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -2054,6 +2427,8 @@ google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200610104632-a5b850bcf112/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2090,6 +2465,7 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -2128,6 +2504,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -2148,10 +2525,12 @@ helm.sh/helm/v3 v3.6.2/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= @@ -2202,6 +2581,10 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20211002133954-f839ab2b2b11/go.mod h1:3RUAWoUC2YFIr0yZ91R4rLakSx2OhdZXUBSV4g4PucY= oras.land/oras-go v0.4.0/go.mod h1:VJcU+VE4rkclUbum5C0O7deEZbBYnsnpbGSACwTjOcg= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/cluster/install.go b/pkg/cluster/install.go index 9db879f0717..66eb1bf3b60 100644 --- a/pkg/cluster/install.go +++ b/pkg/cluster/install.go @@ -19,8 +19,8 @@ import ( "k8s.io/client-go/kubernetes" "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/containerinstall" "github.com/Azure/ARO-RP/pkg/database" - "github.com/Azure/ARO-RP/pkg/installer" aroclient "github.com/Azure/ARO-RP/pkg/operator/clientset/versioned" "github.com/Azure/ARO-RP/pkg/operator/deploy" "github.com/Azure/ARO-RP/pkg/util/restconfig" @@ -202,14 +202,18 @@ func (m *manager) Update(ctx context.Context) error { return m.runSteps(ctx, s, "update") } -func (m *manager) runIntegratedInstaller(ctx context.Context) error { +func (m *manager) runPodmanInstaller(ctx context.Context) error { version, err := m.openShiftVersionFromVersion(ctx) if err != nil { return err } - i := installer.NewInstaller(m.log, m.env, m.doc.ID, m.doc.OpenShiftCluster, m.subscriptionDoc.Subscription, version, m.fpAuthorizer, m.deployments, m.graph) - return i.Install(ctx) + i, err := containerinstall.New(ctx, m.log, m.env, m.doc.ID) + if err != nil { + return err + } + + return i.Install(ctx, m.subscriptionDoc, m.doc, version) } func (m *manager) runHiveInstaller(ctx context.Context) error { @@ -283,7 +287,7 @@ func (m *manager) bootstrap() []steps.Step { ) } else { s = append(s, - steps.Action(m.runIntegratedInstaller), + steps.Action(m.runPodmanInstaller), steps.Action(m.generateKubeconfigs), ) diff --git a/pkg/containerinstall/install.go b/pkg/containerinstall/install.go new file mode 100644 index 00000000000..485ac9ca6c1 --- /dev/null +++ b/pkg/containerinstall/install.go @@ -0,0 +1,216 @@ +package containerinstall + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/Azure/go-autorest/autorest/to" + "github.com/containers/podman/v4/pkg/bindings/containers" + "github.com/containers/podman/v4/pkg/bindings/images" + "github.com/containers/podman/v4/pkg/bindings/secrets" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/opencontainers/runtime-spec/specs-go" + + "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/util/steps" +) + +var ( + devEnvVars = []string{ + "AZURE_FP_CLIENT_ID", + "AZURE_RP_CLIENT_ID", + "AZURE_RP_CLIENT_SECRET", + "AZURE_SUBSCRIPTION_ID", + "AZURE_TENANT_ID", + "DOMAIN_NAME", + "KEYVAULT_PREFIX", + "LOCATION", + "PROXY_HOSTNAME", + "PULL_SECRET", + "RESOURCEGROUP", + } +) + +func (m *manager) Install(ctx context.Context, sub *api.SubscriptionDocument, doc *api.OpenShiftClusterDocument, version *api.OpenShiftVersion) error { + s := []steps.Step{ + steps.Action(func(context.Context) error { + options := &images.PullOptions{ + Quiet: to.BoolPtr(true), + Policy: to.StringPtr("always"), + Username: to.StringPtr(m.pullSecret.Username), + Password: to.StringPtr(m.pullSecret.Password), + } + + _, err := images.Pull(m.conn, version.Properties.InstallerPullspec, options) + return err + }), + steps.Action(func(context.Context) error { return m.createSecrets(ctx, doc, sub) }), + steps.Action(func(context.Context) error { return m.startContainer(ctx, version) }), + steps.Condition(m.containerFinished, 60*time.Minute, false), + steps.Action(m.cleanupContainers), + } + + _, err := steps.Run(ctx, m.log, 10*time.Second, s, nil) + if err != nil { + return err + } + if !m.success { + return fmt.Errorf("failed to install cluster") + } + return nil +} + +func (m *manager) putSecret(secretName string) specgen.Secret { + uid := uint32(os.Getuid()) + gid := uint32(os.Getgid()) + return specgen.Secret{ + Source: m.clusterUUID + "-" + secretName, + Target: "/.azure/" + secretName, + UID: uid, + GID: gid, + Mode: 0o644, + } +} + +func (m *manager) startContainer(ctx context.Context, version *api.OpenShiftVersion) error { + s := specgen.NewSpecGenerator(version.Properties.InstallerPullspec, false) + s.Name = "installer-" + m.clusterUUID + s.User = fmt.Sprintf("%d", os.Getuid()) + + s.Secrets = []specgen.Secret{ + m.putSecret("99_aro.json"), + m.putSecret("99_sub.json"), + m.putSecret("proxy.crt"), + m.putSecret("proxy-client.crt"), + m.putSecret("proxy-client.key"), + } + + s.Env = map[string]string{ + "ARO_RP_MODE": "development", + "ARO_UUID": m.clusterUUID, + "OPENSHIFT_INSTALL_INVOKER": "hive", + "OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE": version.Properties.OpenShiftPullspec, + } + + for _, envvar := range devEnvVars { + s.Env["ARO_"+envvar] = os.Getenv(envvar) + } + + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/.azure", + Type: "tmpfs", + Source: "", + }) + s.WorkDir = "/.azure" + s.Entrypoint = []string{"/bin/bash", "-c", "/bin/openshift-install create manifests && /bin/openshift-install create cluster"} + + _, err := runContainer(m.conn, m.log, s) + return err +} + +func (m *manager) containerFinished(context.Context) (bool, error) { + containerName := "installer-" + m.clusterUUID + inspectData, err := containers.Inspect(m.conn, containerName, nil) + if err != nil { + return false, err + } + + if inspectData.State.Status == "exited" || inspectData.State.Status == "stopped" { + if inspectData.State.ExitCode != 0 { + getContainerLogs(m.conn, m.log, containerName) + return true, fmt.Errorf("container exited with %d", inspectData.State.ExitCode) + } + m.success = true + return true, nil + } + return false, nil +} + +func (m *manager) createSecrets(ctx context.Context, doc *api.OpenShiftClusterDocument, sub *api.SubscriptionDocument) error { + encCluster, err := json.Marshal(doc.OpenShiftCluster) + if err != nil { + return err + } + _, err = secrets.Create(m.conn, bytes.NewBuffer(encCluster), &secrets.CreateOptions{Name: to.StringPtr(m.clusterUUID + "-99_aro.json")}) + if err != nil { + return err + } + + encSub, err := json.Marshal(sub.Subscription) + if err != nil { + return err + } + _, err = secrets.Create(m.conn, bytes.NewBuffer(encSub), &secrets.CreateOptions{Name: to.StringPtr(m.clusterUUID + "-99_sub.json")}) + if err != nil { + return err + } + + basepath := os.Getenv("ARO_CHECKOUT_PATH") + if basepath == "" { + // This assumes we are running from an ARO-RP checkout in development + var err error + _, curmod, _, _ := runtime.Caller(0) + basepath, err = filepath.Abs(filepath.Join(filepath.Dir(curmod), "../..")) + if err != nil { + return err + } + } + + err = m.secretFromFile(filepath.Join(basepath, "secrets/proxy.crt"), "proxy.crt") + if err != nil { + return err + } + + err = m.secretFromFile(filepath.Join(basepath, "secrets/proxy-client.crt"), "proxy-client.crt") + if err != nil { + return err + } + + err = m.secretFromFile(filepath.Join(basepath, "secrets/proxy-client.key"), "proxy-client.key") + if err != nil { + return err + } + + return nil +} + +func (m *manager) secretFromFile(from, name string) error { + f, err := os.Open(from) + if err != nil { + return err + } + + _, err = secrets.Create(m.conn, f, &secrets.CreateOptions{Name: to.StringPtr(m.clusterUUID + "-" + name)}) + return err +} + +func (m *manager) cleanupContainers(ctx context.Context) error { + containerName := "installer-" + m.clusterUUID + + if !m.success { + m.log.Infof("cleaning up failed container %s", containerName) + getContainerLogs(m.conn, m.log, containerName) + } + + _, err := containers.Remove(m.conn, containerName, &containers.RemoveOptions{Force: to.BoolPtr(true), Ignore: to.BoolPtr(true)}) + if err != nil { + m.log.Errorf("unable to remove container: %v", err) + } + + for _, secretName := range []string{"99_aro.json", "99_sub.json", "proxy.crt", "proxy-client.crt", "proxy-client.key"} { + err = secrets.Remove(m.conn, m.clusterUUID+"-"+secretName) + if err != nil { + m.log.Debugf("unable to remove secret %s: %v", m.clusterUUID+"-"+secretName, err) + } + } + return nil +} diff --git a/pkg/containerinstall/install_test.go b/pkg/containerinstall/install_test.go new file mode 100644 index 00000000000..4e661f81f6e --- /dev/null +++ b/pkg/containerinstall/install_test.go @@ -0,0 +1,100 @@ +package containerinstall + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/Azure/go-autorest/autorest/to" + "github.com/containers/podman/v4/pkg/bindings/containers" + "github.com/containers/podman/v4/pkg/bindings/images" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/onsi/gomega/types" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + + "github.com/Azure/ARO-RP/pkg/util/uuid" + testlog "github.com/Azure/ARO-RP/test/util/log" +) + +const TEST_PULLSPEC = "registry.access.redhat.com/ubi8/go-toolset:1.18.4" + +var _ = Describe("Podman", Ordered, func() { + var err error + var conn context.Context + var hook *test.Hook + var log *logrus.Entry + var containerName string + var containerID string + + BeforeAll(func(ctx context.Context) { + var err error + conn, err = getConnection(ctx) + if err != nil { + Skip("unable to access podman: %v") + } + + hook, log = testlog.New() + containerName = uuid.DefaultGenerator.Generate() + }) + + It("can pull images", func() { + _, err = images.Pull(conn, TEST_PULLSPEC, &images.PullOptions{Policy: to.StringPtr("missing")}) + Expect(err).To(BeNil()) + }) + + It("can start a container", func() { + s := specgen.NewSpecGenerator(TEST_PULLSPEC, false) + s.Name = containerName + s.Entrypoint = []string{"/bin/bash", "-c", "echo 'hello'"} + + containerID, err = runContainer(conn, log, s) + Expect(err).To(BeNil()) + }) + + It("can wait for completion", func() { + exit, err := containers.Wait(conn, containerID, nil) + Expect(err).To(BeNil()) + Expect(exit).To(Equal(0), "exit code was %d, not 0", exit) + }) + + It("can fetch container logs", func() { + err = getContainerLogs(conn, log, containerID) + Expect(err).To(BeNil()) + + entries := []map[string]types.GomegaMatcher{ + { + "msg": Equal("created container " + containerName + " with ID " + containerID), + "level": Equal(logrus.InfoLevel), + }, + { + "msg": Equal("started container " + containerID), + "level": Equal(logrus.InfoLevel), + }, + { + "msg": Equal("stdout: hello\n"), + "level": Equal(logrus.InfoLevel), + }, + } + + err = testlog.AssertLoggingOutput(hook, entries) + Expect(err).To(BeNil()) + }) + + AfterAll(func() { + if containerID != "" { + _, err = containers.Remove(conn, containerID, &containers.RemoveOptions{Force: to.BoolPtr(true)}) + Expect(err).To(BeNil()) + } + }) +}) + +func TestContainerInstall(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "ContainerInstall Suite") +} diff --git a/pkg/containerinstall/manager.go b/pkg/containerinstall/manager.go new file mode 100644 index 00000000000..618488ab3ce --- /dev/null +++ b/pkg/containerinstall/manager.go @@ -0,0 +1,57 @@ +package containerinstall + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "errors" + "os" + + "github.com/sirupsen/logrus" + + "github.com/Azure/ARO-RP/pkg/api" + "github.com/Azure/ARO-RP/pkg/env" + "github.com/Azure/ARO-RP/pkg/util/pullsecret" +) + +type ContainerInstaller interface { + Install(ctx context.Context, sub *api.SubscriptionDocument, doc *api.OpenShiftClusterDocument, version *api.OpenShiftVersion) error +} + +type manager struct { + conn context.Context + log *logrus.Entry + env env.Interface + + clusterUUID string + pullSecret *pullsecret.UserPass + + success bool +} + +func New(ctx context.Context, log *logrus.Entry, env env.Interface, clusterUUID string) (ContainerInstaller, error) { + isDevelopment := env.IsLocalDevelopmentMode() + if !isDevelopment { + return nil, errors.New("running cluster installs in a container is only run in development") + } + + pullSecret, err := pullsecret.Extract(os.Getenv("PULL_SECRET"), env.ACRDomain()) + if err != nil { + return nil, err + } + + conn, err := getConnection(ctx) + if err != nil { + return nil, err + } + + return &manager{ + conn: conn, + log: log, + env: env, + + clusterUUID: clusterUUID, + pullSecret: pullSecret, + }, nil +} diff --git a/pkg/containerinstall/podman.go b/pkg/containerinstall/podman.go new file mode 100644 index 00000000000..a2cd60ae8de --- /dev/null +++ b/pkg/containerinstall/podman.go @@ -0,0 +1,64 @@ +package containerinstall + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "context" + "os" + + "github.com/Azure/go-autorest/autorest/to" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/bindings/containers" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/sirupsen/logrus" +) + +func getConnection(ctx context.Context) (context.Context, error) { + socket := os.Getenv("ARO_PODMAN_SOCKET") + + if socket == "" { + sock_dir := os.Getenv("XDG_RUNTIME_DIR") + socket = "unix:" + sock_dir + "/podman/podman.sock" + } + + return bindings.NewConnection(ctx, socket) +} + +func getContainerLogs(ctx context.Context, log *logrus.Entry, containerName string) error { + stdout, stderr := make(chan string, 1024), make(chan string, 1024) + go func() { + for v := range stdout { + log.Infof("stdout: %s", v) + } + }() + + go func() { + for v := range stderr { + log.Errorf("stderr: %s", v) + } + }() + err := containers.Logs( + ctx, + containerName, + &containers.LogOptions{Stderr: to.BoolPtr(true), Stdout: to.BoolPtr(true)}, + stdout, + stderr, + ) + return err +} + +func runContainer(ctx context.Context, log *logrus.Entry, s *specgen.SpecGenerator) (string, error) { + container, err := containers.CreateWithSpec(ctx, s, nil) + if err != nil { + return "", err + } + log.Infof("created container %s with ID %s", s.Name, container.ID) + + err = containers.Start(ctx, container.ID, nil) + if err != nil { + return container.ID, err + } + log.Infof("started container %s", container.ID) + return container.ID, nil +} diff --git a/pkg/util/pullsecret/extract.go b/pkg/util/pullsecret/extract.go new file mode 100644 index 00000000000..576d7af8566 --- /dev/null +++ b/pkg/util/pullsecret/extract.go @@ -0,0 +1,56 @@ +package pullsecret + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" +) + +type UserPass struct { + Username string + Password string +} + +func userPassFromBase64(secret string) (*UserPass, error) { + decoded, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + return nil, errors.New("malformed auth token") + } + + split := strings.Split(string(decoded), ":") + if len(split) != 2 { + return nil, errors.New("auth token not in format of username:password") + } + + return &UserPass{ + Username: split[0], + Password: split[1], + }, nil +} + +// Extract decodes a username and password for a given domain from a +// JSON-encoded pull secret (e.g. from docker auth) +func Extract(rawPullSecret, domain string) (*UserPass, error) { + pullSecrets := &pullSecret{} + err := json.Unmarshal([]byte(rawPullSecret), pullSecrets) + if err != nil { + return nil, errors.New("malformed pullsecret (invalid JSON)") + } + + auth, ok := pullSecrets.Auths[domain] + if !ok { + return nil, fmt.Errorf("missing '%s' key in pullsecret", domain) + } + + token, ok := auth["auth"] + if !ok { + return nil, errors.New("malformed pullsecret (no auth key)") + } + + return userPassFromBase64(token.(string)) +} diff --git a/pkg/util/pullsecret/extract_test.go b/pkg/util/pullsecret/extract_test.go new file mode 100644 index 00000000000..2745960e21d --- /dev/null +++ b/pkg/util/pullsecret/extract_test.go @@ -0,0 +1,59 @@ +package pullsecret + +// Copyright (c) Microsoft Corporation. +// Licensed under the Apache License 2.0. + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Extract()", func() { + It("correctly decodes a pullsecret", func() { + pullSecret := "{\"auths\": {\"example.com\": {\"auth\": \"dGVzdHVzZXI6dGVzdHBhc3M=\"}}}" + + correctlyExtracted, err := Extract(pullSecret, "example.com") + Expect(err).To(BeNil()) + Expect(correctlyExtracted).To(Equal(&UserPass{Username: "testuser", Password: "testpass"})) + }) + + It("errors if no pullsecret for that name exists", func() { + pullSecret := "{\"auths\": {\"example.com\": {\"auth\": \"dGVzdHVzZXI6dGVzdHBhc3M=\"}}}" + + _, err := Extract(pullSecret, "missingexample.com") + Expect(err).To(MatchError("missing 'missingexample.com' key in pullsecret")) + }) + + It("errors if the json is invalid", func() { + _, err := Extract("\"", "example.com") + Expect(err).To(MatchError("malformed pullsecret (invalid JSON)")) + }) + + It("errors if the base64 is invalid", func() { + pullSecret := "{\"auths\": {\"example.com\": {\"auth\": \"5\"}}}" + + _, err := Extract(pullSecret, "example.com") + Expect(err).To(MatchError("malformed auth token")) + }) + + It("errors if the base64 does not contain a username and password", func() { + pullSecret := "{\"auths\": {\"example.com\": {\"auth\": \"c29tZXRoaW5nZWxzZQ==\"}}}" + + _, err := Extract(pullSecret, "example.com") + Expect(err).To(MatchError("auth token not in format of username:password")) + }) + + It("errors if pullsecret has no auth key for domain", func() { + pullSecret := "{\"auths\": {\"example.com\": {\"password\": \"dGVzdHVzZXI6dGVzdHBhc3M=\"}}}" + + _, err := Extract(pullSecret, "example.com") + Expect(err).To(MatchError("malformed pullsecret (no auth key)")) + }) +}) + +func TestPullSecret(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "PullSecret Suite") +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go new file mode 100644 index 00000000000..d39eccf0238 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go @@ -0,0 +1,4 @@ +// +build !windows +// This file only exists to allow go get on non-Windows platforms. + +package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go new file mode 100644 index 00000000000..34160966399 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go @@ -0,0 +1,68 @@ +package backuptar + +import ( + "archive/tar" + "fmt" + "strconv" + "strings" + "time" +) + +// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go +// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually. +// Idea taken from containerd which did the same thing. + +// parsePAXTime takes a string of the form %d.%d as described in the PAX +// specification. Note that this implementation allows for negative timestamps, +// which is allowed for by the PAX specification, but not always portable. +func parsePAXTime(s string) (time.Time, error) { + const maxNanoSecondDigits = 9 + + // Split string into seconds and sub-seconds parts. + ss, sn := s, "" + if pos := strings.IndexByte(s, '.'); pos >= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, tar.ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, tar.ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*nsecs), nil // Negative correction + } + return time.Unix(secs, nsecs), nil +} + +// formatPAXTime converts ts into a time of the form %d.%d as described in the +// PAX specification. This function is capable of negative timestamps. +func formatPAXTime(ts time.Time) (s string) { + secs, nsecs := ts.Unix(), ts.Nanosecond() + if nsecs == 0 { + return strconv.FormatInt(secs, 10) + } + + // If seconds is negative, then perform correction. + sign := "" + if secs < 0 { + sign = "-" // Remember sign + secs = -(secs + 1) // Add a second to secs + nsecs = -(nsecs - 1e9) // Take that second away from nsecs + } + return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") +} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go new file mode 100644 index 00000000000..2342a7fcd6f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -0,0 +1,517 @@ +// +build windows + +package backuptar + +import ( + "archive/tar" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +const ( + hdrFileAttributes = "MSWINDOWS.fileattr" + hdrSecurityDescriptor = "MSWINDOWS.sd" + hdrRawSecurityDescriptor = "MSWINDOWS.rawsd" + hdrMountPoint = "MSWINDOWS.mountpoint" + hdrEaPrefix = "MSWINDOWS.xattr." + + hdrCreationTime = "LIBARCHIVE.creationtime" +) + +// zeroReader is an io.Reader that always returns 0s. +type zeroReader struct{} + +func (zr zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { + curOffset := int64(0) + for { + bhdr, err := br.Next() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if bhdr.Id != winio.BackupSparseBlock { + return fmt.Errorf("unexpected stream %d", bhdr.Id) + } + + // We can't seek backwards, since we have already written that data to the tar.Writer. + if bhdr.Offset < curOffset { + return fmt.Errorf("cannot seek back from %d to %d", curOffset, bhdr.Offset) + } + // archive/tar does not support writing sparse files + // so just write zeroes to catch up to the current offset. + if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { + return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err) + } + if bhdr.Size == 0 { + // A sparse block with size = 0 is used to mark the end of the sparse blocks. + break + } + n, err := io.Copy(t, br) + if err != nil { + return err + } + if n != bhdr.Size { + return fmt.Errorf("copied %d bytes instead of %d at offset %d", n, bhdr.Size, bhdr.Offset) + } + curOffset = bhdr.Offset + n + } + return nil +} + +// BasicInfoHeader creates a tar header from basic file information. +func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { + hdr := &tar.Header{ + Format: tar.FormatPAX, + Name: filepath.ToSlash(name), + Size: size, + Typeflag: tar.TypeReg, + ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), + ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), + AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), + PAXRecords: make(map[string]string), + } + hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) + hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + hdr.Mode |= c_ISDIR + hdr.Size = 0 + hdr.Typeflag = tar.TypeDir + } + return hdr +} + +// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file +// from the tar header and returns the security descriptor into a byte slice. +func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { + // Maintaining old SDDL-based behavior for backward + // compatibility. All new tar headers written by this library + // will have raw binary for the security descriptor. + var sd []byte + var err error + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + return sd, nil +} + +// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the +// current file from the tar header and returns it as a byte slice. +func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { + var eas []winio.ExtendedAttribute + var eadata []byte + var err error + for k, v := range hdr.PAXRecords { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err = winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + } + return eadata, nil +} + +// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header +// and encodes it into a byte slice. The file for which this function is called must be a +// symlink. +func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { + _, isMountPoint := hdr.PAXRecords[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + return winio.EncodeReparsePoint(&rp) +} + +// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. +// +// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. +// +// The additional Win32 metadata is: +// +// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// +// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// +// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { + name = filepath.ToSlash(name) + hdr := BasicInfoHeader(name, size, fileInfo) + + // If r can be seeked, then this function is two-pass: pass 1 collects the + // tar header data, and pass 2 copies the data stream. If r cannot be + // seeked, then some header data (in particular EAs) will be silently lost. + var ( + restartPos int64 + err error + ) + sr, readTwice := r.(io.Seeker) + if readTwice { + if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { + readTwice = false + } + } + + br := winio.NewBackupStreamReader(r) + var dataHdr *winio.BackupHeader + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupData: + hdr.Mode |= c_ISREG + if !readTwice { + dataHdr = bhdr + } + case winio.BackupSecurity: + sd, err := ioutil.ReadAll(br) + if err != nil { + return err + } + hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) + + case winio.BackupReparseData: + hdr.Mode |= c_ISLNK + hdr.Typeflag = tar.TypeSymlink + reparseBuffer, err := ioutil.ReadAll(br) + rp, err := winio.DecodeReparsePoint(reparseBuffer) + if err != nil { + return err + } + if rp.IsMountPoint { + hdr.PAXRecords[hdrMountPoint] = "1" + } + hdr.Linkname = rp.Target + + case winio.BackupEaData: + eab, err := ioutil.ReadAll(br) + if err != nil { + return err + } + eas, err := winio.DecodeExtendedAttributes(eab) + if err != nil { + return err + } + for _, ea := range eas { + // Use base64 encoding for the binary value. Note that there + // is no way to encode the EA's flags, since their use doesn't + // make any sense for persisted EAs. + hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) + } + + case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) + } + } + + err = t.WriteHeader(hdr) + if err != nil { + return err + } + + if readTwice { + // Get back to the data stream. + if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { + return err + } + for dataHdr == nil { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if bhdr.Id == winio.BackupData { + dataHdr = bhdr + } + } + } + + // The logic for copying file contents is fairly complicated due to the need for handling sparse files, + // and the weird ways they are represented by BackupRead. A normal file will always either have a data stream + // with size and content, or no data stream at all (if empty). However, for a sparse file, the content can also + // be represented using a series of sparse block streams following the data stream. Additionally, the way sparse + // files are handled by BackupRead has changed in the OS recently. The specifics of the representation are described + // in the list at the bottom of this block comment. + // + // Sparse files can be represented in four different ways, based on the specifics of the file. + // - Size = 0: + // Previously: BackupRead yields no data stream and no sparse block streams. + // Recently: BackupRead yields a data stream with size = 0. There are no following sparse block streams. + // - Size > 0, no allocated ranges: + // BackupRead yields a data stream with size = 0. Following is a single sparse block stream with + // size = 0 and offset = . + // - Size > 0, one allocated range: + // BackupRead yields a data stream with size = containing the file contents. There are no + // sparse block streams. This is the case if you take a normal file with contents and simply set the + // sparse flag on it. + // - Size > 0, multiple allocated ranges: + // BackupRead yields a data stream with size = 0. Following are sparse block streams for each allocated + // range of the file containing the range contents. Finally there is a sparse block stream with + // size = 0 and offset = . + + if dataHdr != nil { + // A data stream was found. Copy the data. + // We assume that we will either have a data stream size > 0 XOR have sparse block streams. + if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { + if size != dataHdr.Size { + return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) + } + if _, err = io.Copy(t, br); err != nil { + return fmt.Errorf("%s: copying contents from data stream: %s", name, err) + } + } else if size > 0 { + // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. + // These files have no sparse block streams, so skip the copySparse call if file size = 0. + if err = copySparse(t, br); err != nil { + return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err) + } + } + } + + // Look for streams after the data stream. The only ones we handle are alternate data streams. + // Other streams may have metadata that could be serialized, but the tar header has already + // been written. In practice, this means that we don't get EA or TXF metadata. + for { + bhdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupAlternateData: + altName := bhdr.Name + if strings.HasSuffix(altName, ":$DATA") { + altName = altName[:len(altName)-len(":$DATA")] + } + if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { + hdr = &tar.Header{ + Format: hdr.Format, + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } + + } else { + // Unsupported for now, since the size of the alternate stream is not present + // in the backup stream until after the data has been read. + return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) + } + case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: + // ignore these streams + default: + return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) + } + } + return nil +} + +// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by +// WriteTarFileFromBackupStream. +func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { + name = hdr.Name + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + size = hdr.Size + } + fileInfo = &winio.FileBasicInfo{ + LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()), + LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), + ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()), + // Default to ModTime, we'll pull hdrCreationTime below if present + CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()), + } + if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return "", 0, nil, err + } + fileInfo.FileAttributes = uint32(attr) + } else { + if hdr.Typeflag == tar.TypeDir { + fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + } + } + if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok { + creationTime, err := parsePAXTime(creationTimeStr) + if err != nil { + return "", 0, nil, err + } + fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano()) + } + return +} + +// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple +// tar file entries in order to collect all the alternate data streams for the file, it returns the next +// tar file that was not processed, or io.EOF is there are no more. +func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { + bw := winio.NewBackupStreamWriter(w) + + sd, err := SecurityDescriptorFromTarHeader(hdr) + if err != nil { + return nil, err + } + if len(sd) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupSecurity, + Size: int64(len(sd)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(sd) + if err != nil { + return nil, err + } + } + + eadata, err := ExtendedAttributesFromTarHeader(hdr) + if err != nil { + return nil, err + } + if len(eadata) != 0 { + bhdr := winio.BackupHeader{ + Id: winio.BackupEaData, + Size: int64(len(eadata)), + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(eadata) + if err != nil { + return nil, err + } + } + + if hdr.Typeflag == tar.TypeSymlink { + reparse := EncodeReparsePointFromTarHeader(hdr) + bhdr := winio.BackupHeader{ + Id: winio.BackupReparseData, + Size: int64(len(reparse)), + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = bw.Write(reparse) + if err != nil { + return nil, err + } + + } + + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + bhdr := winio.BackupHeader{ + Id: winio.BackupData, + Size: hdr.Size, + } + err := bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } + // Copy all the alternate data streams and return the next non-ADS header. + for { + ahdr, err := t.Next() + if err != nil { + return nil, err + } + if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { + return ahdr, nil + } + bhdr := winio.BackupHeader{ + Id: winio.BackupAlternateData, + Size: ahdr.Size, + Name: ahdr.Name[len(hdr.Name):] + ":$DATA", + } + err = bw.WriteHeader(&bhdr) + if err != nil { + return nil, err + } + _, err = io.Copy(bw, t) + if err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go new file mode 100644 index 00000000000..602920786c9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -0,0 +1,160 @@ +// +build windows + +package security + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +type ( + accessMask uint32 + accessMode uint32 + desiredAccess uint32 + inheritMode uint32 + objectType uint32 + shareMode uint32 + securityInformation uint32 + trusteeForm uint32 + trusteeType uint32 + + explicitAccess struct { + accessPermissions accessMask + accessMode accessMode + inheritance inheritMode + trustee trustee + } + + trustee struct { + multipleTrustee *trustee + multipleTrusteeOperation int32 + trusteeForm trusteeForm + trusteeType trusteeType + name uintptr + } +) + +const ( + accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ + + accessModeGrant accessMode = 1 + + desiredAccessReadControl desiredAccess = 0x20000 + desiredAccessWriteDac desiredAccess = 0x40000 + + gvmga = "GrantVmGroupAccess:" + + inheritModeNoInheritance inheritMode = 0x0 + inheritModeSubContainersAndObjectsInherit inheritMode = 0x3 + + objectTypeFileObject objectType = 0x1 + + securityInformationDACL securityInformation = 0x4 + + shareModeRead shareMode = 0x1 + shareModeWrite shareMode = 0x2 + + sidVmGroup = "S-1-5-83-0" + + trusteeFormIsSid trusteeForm = 0 + + trusteeTypeWellKnownGroup trusteeType = 5 +) + +// GrantVMGroupAccess sets the DACL for a specified file or directory to +// include Grant ACE entries for the VM Group SID. This is a golang re- +// implementation of the same function in vmcompute, just not exported in +// RS5. Which kind of sucks. Sucks a lot :/ +func GrantVmGroupAccess(name string) error { + // Stat (to determine if `name` is a directory). + s, err := os.Stat(name) + if err != nil { + return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) + } + + // Get a handle to the file/directory. Must defer Close on success. + fd, err := createFile(name, s.IsDir()) + if err != nil { + return err // Already wrapped + } + defer syscall.CloseHandle(fd) + + // Get the current DACL and Security Descriptor. Must defer LocalFree on success. + ot := objectTypeFileObject + si := securityInformationDACL + sd := uintptr(0) + origDACL := uintptr(0) + if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { + return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + + // Generate a new DACL which is the current DACL with the required ACEs added. + // Must defer LocalFree on success. + newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) + if err != nil { + return err // Already wrapped + } + defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + + // And finally use SetSecurityInfo to apply the updated DACL. + if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { + return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) + } + + return nil +} + +// createFile is a helper function to call [Nt]CreateFile to get a handle to +// the file or directory. +func createFile(name string, isDir bool) (syscall.Handle, error) { + namep := syscall.StringToUTF16(name) + da := uint32(desiredAccessReadControl | desiredAccessWriteDac) + sm := uint32(shareModeRead | shareModeWrite) + fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) + if isDir { + fa = uint32(fa | syscall.FILE_FLAG_BACKUP_SEMANTICS) + } + fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) + if err != nil { + return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) + } + return fd, nil +} + +// generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. +// The caller is responsible for LocalFree of the returned DACL on success. +func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { + // Generate pointers to the SIDs based on the string SIDs + sid, err := syscall.StringToSid(sidVmGroup) + if err != nil { + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) + } + + inheritance := inheritModeNoInheritance + if isDir { + inheritance = inheritModeSubContainersAndObjectsInherit + } + + eaArray := []explicitAccess{ + explicitAccess{ + accessPermissions: accessMaskDesiredPermission, + accessMode: accessModeGrant, + inheritance: inheritance, + trustee: trustee{ + trusteeForm: trusteeFormIsSid, + trusteeType: trusteeTypeWellKnownGroup, + name: uintptr(unsafe.Pointer(sid)), + }, + }, + } + + modifiedDACL := uintptr(0) + if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { + return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) + } + + return modifiedDACL, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go new file mode 100644 index 00000000000..d7096716ce2 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go @@ -0,0 +1,7 @@ +package security + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go + +//sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo +//sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo +//sys setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) = advapi32.SetEntriesInAclW diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go new file mode 100644 index 00000000000..4084680e0f0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go @@ -0,0 +1,70 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package security + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") +) + +func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go new file mode 100644 index 00000000000..f7f78fc2304 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -0,0 +1,350 @@ +//go:build windows +// +build windows + +package vhd + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go + +//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk +//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk +//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath + +type ( + CreateVirtualDiskFlag uint32 + VirtualDiskFlag uint32 + AttachVirtualDiskFlag uint32 + DetachVirtualDiskFlag uint32 + VirtualDiskAccessMask uint32 +) + +type VirtualStorageType struct { + DeviceID uint32 + VendorID guid.GUID +} + +type CreateVersion2 struct { + UniqueID guid.GUID + MaximumSize uint64 + BlockSizeInBytes uint32 + SectorSizeInBytes uint32 + PhysicalSectorSizeInByte uint32 + ParentPath *uint16 // string + SourcePath *uint16 // string + OpenFlags uint32 + ParentVirtualStorageType VirtualStorageType + SourceVirtualStorageType VirtualStorageType + ResiliencyGUID guid.GUID +} + +type CreateVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 CreateVersion2 +} + +type OpenVersion2 struct { + GetInfoOnly bool + ReadOnly bool + ResiliencyGUID guid.GUID +} + +type OpenVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 OpenVersion2 +} + +// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating +// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. +type openVersion2 struct { + getInfoOnly int32 + readOnly int32 + resiliencyGUID guid.GUID +} + +type openVirtualDiskParameters struct { + version uint32 + version2 openVersion2 +} + +type AttachVersion2 struct { + RestrictedOffset uint64 + RestrictedLength uint64 +} + +type AttachVirtualDiskParameters struct { + Version uint32 + Version2 AttachVersion2 +} + +const ( + VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 + + // Access Mask for opening a VHD + VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 + VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 + VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 + VirtualDiskAccessDetach VirtualDiskAccessMask = 0x00040000 + VirtualDiskAccessGetInfo VirtualDiskAccessMask = 0x00080000 + VirtualDiskAccessCreate VirtualDiskAccessMask = 0x00100000 + VirtualDiskAccessMetaOps VirtualDiskAccessMask = 0x00200000 + VirtualDiskAccessRead VirtualDiskAccessMask = 0x000d0000 + VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 + VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 + + // Flags for creating a VHD + CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 + CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 + CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 + CreateVirtualDiskFlagDoNotCopyMetadataFromParent CreateVirtualDiskFlag = 0x4 + CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 + CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 + CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 + + // Flags for opening a VHD + OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 + OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 + OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 + OpenVirtualDiskFlagBootDrive VirtualDiskFlag = 0x00000004 + OpenVirtualDiskFlagCachedIO VirtualDiskFlag = 0x00000008 + OpenVirtualDiskFlagCustomDiffChain VirtualDiskFlag = 0x00000010 + OpenVirtualDiskFlagParentCachedIO VirtualDiskFlag = 0x00000020 + OpenVirtualDiskFlagVhdsetFileOnly VirtualDiskFlag = 0x00000040 + OpenVirtualDiskFlagIgnoreRelativeParentLocator VirtualDiskFlag = 0x00000080 + OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 + OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 + + // Flags for attaching a VHD + AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 + AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 + AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 + AttachVirtualDiskFlagPermanentLifetime AttachVirtualDiskFlag = 0x00000004 + AttachVirtualDiskFlagNoLocalHost AttachVirtualDiskFlag = 0x00000008 + AttachVirtualDiskFlagNoSecurityDescriptor AttachVirtualDiskFlag = 0x00000010 + AttachVirtualDiskFlagBypassDefaultEncryptionPolicy AttachVirtualDiskFlag = 0x00000020 + AttachVirtualDiskFlagNonPnp AttachVirtualDiskFlag = 0x00000040 + AttachVirtualDiskFlagRestrictedRange AttachVirtualDiskFlag = 0x00000080 + AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 + AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 + + // Flags for detaching a VHD + DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 +) + +// CreateVhdx is a helper function to create a simple vhdx file at the given path using +// default values. +func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { + params := CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + MaximumSize: uint64(maxSizeInGb) * 1024 * 1024 * 1024, + BlockSizeInBytes: blockSizeInMb * 1024 * 1024, + }, + } + + handle, err := CreateVirtualDisk(path, VirtualDiskAccessNone, CreateVirtualDiskFlagNone, ¶ms) + if err != nil { + return err + } + + return syscall.CloseHandle(handle) +} + +// DetachVirtualDisk detaches a virtual hard disk by handle. +func DetachVirtualDisk(handle syscall.Handle) (err error) { + if err := detachVirtualDisk(handle, 0, 0); err != nil { + return fmt.Errorf("failed to detach virtual disk: %w", err) + } + return nil +} + +// DetachVhd detaches a vhd found at `path`. +func DetachVhd(path string) error { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + defer syscall.CloseHandle(handle) + return DetachVirtualDisk(handle) +} + +// AttachVirtualDisk attaches a virtual hard disk for use. +func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { + // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. + if err := attachVirtualDisk( + handle, + nil, + uint32(attachVirtualDiskFlag), + 0, + parameters, + nil, + ); err != nil { + return fmt.Errorf("failed to attach virtual disk: %w", err) + } + return nil +} + +// AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 +// of the ATTACH_VIRTUAL_DISK_PARAMETERS. +func AttachVhd(path string) (err error) { + handle, err := OpenVirtualDisk( + path, + VirtualDiskAccessNone, + OpenVirtualDiskFlagCachedIO|OpenVirtualDiskFlagIgnoreRelativeParentLocator, + ) + if err != nil { + return err + } + + defer syscall.CloseHandle(handle) + params := AttachVirtualDiskParameters{Version: 2} + if err := AttachVirtualDisk( + handle, + AttachVirtualDiskFlagNone, + ¶ms, + ); err != nil { + return fmt.Errorf("failed to attach virtual disk: %w", err) + } + return nil +} + +// OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. +func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { + parameters := OpenVirtualDiskParameters{Version: 2} + handle, err := OpenVirtualDiskWithParameters( + vhdPath, + virtualDiskAccessMask, + openVirtualDiskFlags, + ¶meters, + ) + if err != nil { + return 0, err + } + return handle, nil +} + +// OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. +func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + getInfoOnly int32 + readOnly int32 + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + if parameters.Version2.GetInfoOnly { + getInfoOnly = 1 + } + if parameters.Version2.ReadOnly { + readOnly = 1 + } + params := &openVirtualDiskParameters{ + version: parameters.Version, + version2: openVersion2{ + getInfoOnly, + readOnly, + parameters.Version2.ResiliencyGUID, + }, + } + if err := openVirtualDisk( + &defaultType, + vhdPath, + uint32(virtualDiskAccessMask), + uint32(openVirtualDiskFlags), + params, + &handle, + ); err != nil { + return 0, fmt.Errorf("failed to open virtual disk: %w", err) + } + return handle, nil +} + +// CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. +func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { + var ( + handle syscall.Handle + defaultType VirtualStorageType + ) + if parameters.Version != 2 { + return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) + } + + if err := createVirtualDisk( + &defaultType, + path, + uint32(virtualDiskAccessMask), + nil, + uint32(createVirtualDiskFlags), + 0, + parameters, + nil, + &handle, + ); err != nil { + return handle, fmt.Errorf("failed to create virtual disk: %w", err) + } + return handle, nil +} + +// GetVirtualDiskPhysicalPath takes a handle to a virtual hard disk and returns the physical +// path of the disk on the machine. This path is in the form \\.\PhysicalDriveX where X is an integer +// that represents the particular enumeration of the physical disk on the caller's system. +func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { + var ( + diskPathSizeInBytes uint32 = 256 * 2 // max path length 256 wide chars + diskPhysicalPathBuf [256]uint16 + ) + if err := getVirtualDiskPhysicalPath( + handle, + &diskPathSizeInBytes, + &diskPhysicalPathBuf[0], + ); err != nil { + return "", fmt.Errorf("failed to get disk physical path: %w", err) + } + return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil +} + +// CreateDiffVhd is a helper function to create a differencing virtual disk. +func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { + // Setting `ParentPath` is how to signal to create a differencing disk. + createParams := &CreateVirtualDiskParameters{ + Version: 2, + Version2: CreateVersion2{ + ParentPath: windows.StringToUTF16Ptr(baseVhdPath), + BlockSizeInBytes: blockSizeInMB * 1024 * 1024, + OpenFlags: uint32(OpenVirtualDiskFlagCachedIO), + }, + } + + vhdHandle, err := CreateVirtualDisk( + diffVhdPath, + VirtualDiskAccessNone, + CreateVirtualDiskFlagNone, + createParams, + ) + if err != nil { + return fmt.Errorf("failed to create differencing vhd: %w", err) + } + if err := syscall.CloseHandle(vhdHandle); err != nil { + return fmt.Errorf("failed to close differencing vhd handle: %w", err) + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go new file mode 100644 index 00000000000..1d7498db3be --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -0,0 +1,106 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package vhd + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procCreateVirtualDisk = modvirtdisk.NewProc("CreateVirtualDisk") + procDetachVirtualDisk = modvirtdisk.NewProc("DetachVirtualDisk") + procGetVirtualDiskPhysicalPath = modvirtdisk.NewProc("GetVirtualDiskPhysicalPath") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") +) + +func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { + r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _createVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, securityDescriptor, createVirtualDiskFlags, providerSpecificFlags, parameters, overlapped, handle) +} + +func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { + r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(path) + if win32err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { + r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes new file mode 100644 index 00000000000..94f480de94e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore new file mode 100644 index 00000000000..54ed6f06c9d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -0,0 +1,38 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Ignore vscode setting files +.vscode/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Ignore gcs bin directory +service/bin/ +service/pkg/ + +*.img +*.vhd +*.tar.gz + +# Make stuff +.rootfs-done +bin/* +rootfs/* +*.o +/build/ + +deps/* +out/* + +.idea/ +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml new file mode 100644 index 00000000000..2400e7f1e02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -0,0 +1,99 @@ +run: + timeout: 8m + +linters: + enable: + - stylecheck + +linters-settings: + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + + +issues: + # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like + # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go + # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't + # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + exclude-rules: + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/CODEOWNERS b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS new file mode 100644 index 00000000000..f4c5a07d14b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/CODEOWNERS @@ -0,0 +1 @@ +* @microsoft/containerplat \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 00000000000..49d21669aee --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile new file mode 100644 index 00000000000..a8f5516cd0b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -0,0 +1,87 @@ +BASE:=base.tar.gz + +GO:=go +GO_FLAGS:=-ldflags "-s -w" # strip Go binaries +CGO_ENABLED:=0 +GOMODVENDOR:= + +CFLAGS:=-O2 -Wall +LDFLAGS:=-static -s # strip C binaries + +GO_FLAGS_EXTRA:= +ifeq "$(GOMODVENDOR)" "1" +GO_FLAGS_EXTRA += -mod=vendor +endif +GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +# The link aliases for gcstools +GCS_TOOLS=\ + generichook + +.PHONY: all always rootfs test + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin deps rootfs out + +test: + cd $(SRCROOT) && go test -v ./internal/guest/... + +out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile + @mkdir -p out + rm -rf rootfs + mkdir -p rootfs/bin/ + cp bin/init rootfs/ + cp bin/vsockexec rootfs/bin/ + cp bin/cmd/gcs rootfs/bin/ + cp bin/cmd/gcstools rootfs/bin/ + for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done + git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \ + git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch + tar -zcf $@ -C rootfs . + rm -rf rootfs + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed + +-include deps/cmd/gcs.gomake +-include deps/cmd/gcstools.gomake + +# Implicit rule for includes that define Go targets. +%.gomake: $(SRCROOT)/Makefile + @mkdir -p $(dir $@) + @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new + @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new + @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new + @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new + @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new + @/bin/echo -e '\tmv $$@.new $$@' >> $@.new + @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new + mv $@.new $@ + +VPATH=$(SRCROOT) + +bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +bin/init: init/init.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +%.o: %.c + @mkdir -p $(dir $@) + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml new file mode 100644 index 00000000000..ee18671aa64 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -0,0 +1,49 @@ +version = "unstable" +generator = "gogoctrd" +plugins = ["grpc", "fieldpath"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["./protobuf"] + + # Paths that should be treated as include roots in relation to the vendor + # directory. These will be calculated with the vendor directory nearest the + # target package. + packages = ["github.com/gogo/protobuf"] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +# This section maps protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/struct.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" + "github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1" + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"] +plugins = ["ttrpc"] + +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"] +plugins = ["ttrpc"] \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md new file mode 100644 index 00000000000..b8ca926a9da --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -0,0 +1,120 @@ +# hcsshim + +[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) + +This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. + +It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. + +## Building + +While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +### Linux Hyper-V Container Guest Agent + +To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. +```powershell +C:\> $env:GOOS="linux" +C:\> go build .\cmd\gcs\ +``` + +or on a Linux machine +```sh +> go build ./cmd/gcs +``` + +If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. + +```sh +docker pull busybox +docker run --name base_image_container busybox +docker export base_image_container | gzip > base.tar.gz +BASE=./base.tar.gz +make all +``` + +If the build is successful, in the `./out` folder you should see: +```sh +> ls ./out/ +delta.tar.gz initrd.img rootfs.tar.gz +``` + +### Containerd Shim +For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. + +```powershell +C:\> $env:GOOS="windows" +C:\> go build .\cmd\containerd-shim-runhcs-v1 +``` + +Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +```powershell +.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii +``` + +This config file will already have the shim set as the default runtime for cri interactions. + +To trial using the shim out with ctr.exe: +```powershell +C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to +certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for +more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure +that all commits in a given PR are signed-off. + +### Test Directory (Important to note) + +This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this +project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has +its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file +has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project +(which is the repo itself on your disk). + +``` +replace ( + github.com/Microsoft/hcsshim => ../ +) +``` + +Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the +CI in this project will check if the files are out of date and will fail if this is true. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Dependencies + +This project requires Golang 1.9 or newer to build. + +For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). + +## Reporting Security Issues + +Security issues and bugs should be reported privately, via email, to the Microsoft Security +Response Center (MSRC) at [secure@microsoft.com](mailto:secure@microsoft.com). You should +receive a response within 24 hours. If for some reason you do not, please follow up via +email to ensure we received your original message. Further information, including the +[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in +the [Security TechCenter](https://technet.microsoft.com/en-us/security/default). + +For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet + +--------------- +Copyright (c) 2018 Microsoft Corp. All rights reserved. diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go new file mode 100644 index 00000000000..7f1f2823ddc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// AttachLayerStorageFilter sets up the layer storage filter on a writable +// container layer. +// +// `layerPath` is a path to a directory the writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.AttachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachLayerStorageFilter(layerPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go new file mode 100644 index 00000000000..8e28e6c5046 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DestroyLayer deletes a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DestroyLayer(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DestroyLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDestroyLayer(layerPath) + if err != nil { + return errors.Wrap(err, "failed to destroy layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go new file mode 100644 index 00000000000..435473257e3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// DetachLayerStorageFilter detaches the layer storage filter on a writable container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { + title := "hcsshim.DetachLayerStorageFilter" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) + + err = hcsDetachLayerStorageFilter(layerPath) + if err != nil { + return errors.Wrap(err, "failed to detach layer storage filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go new file mode 100644 index 00000000000..a1b12dd1292 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go @@ -0,0 +1,46 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ExportLayer exports a container layer. +// +// `layerPath` is a path to a directory containing the layer to export. +// +// `exportFolderPath` is a pre-existing folder to export the layer to. +// +// `layerData` is the parent layer data. +// +// `options` are the export options applied to the exported layer. +func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { + title := "hcsshim.ExportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("exportFolderPath", exportFolderPath), + ) + + ldbytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + obytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) + if err != nil { + return errors.Wrap(err, "failed to export layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go new file mode 100644 index 00000000000..83c0fa33f0f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go @@ -0,0 +1,26 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. +// +// If the VHD is not mounted it will be temporarily mounted. +func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { + title := "hcsshim.FormatWritableLayerVhd" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + err = hcsFormatWritableLayerVhd(vhdHandle) + if err != nil { + return errors.Wrap(err, "failed to format writable layer vhd") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go new file mode 100644 index 00000000000..87fee452cd3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -0,0 +1,193 @@ +package computestorage + +import ( + "context" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio/pkg/security" + "github.com/Microsoft/go-winio/vhd" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const defaultVHDXBlockSizeInMB = 1 + +// SetupContainerBaseLayer is a helper to setup a containers scratch. It +// will create and format the vhdx's inside and the size is configurable with the sizeInGB +// parameter. +// +// `layerPath` is the path to the base container layer on disk. +// +// `baseVhdPath` is the path to where the base vhdx for the base layer should be created. +// +// `diffVhdPath` is the path where the differencing disk for the base layer should be created. +// +// `sizeInGB` is the size in gigabytes to make the base vhdx. +func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + var ( + hivesPath = filepath.Join(layerPath, "Hives") + layoutPath = filepath.Join(layerPath, "Layout") + ) + + // We need to remove the hives directory and layout file as `SetupBaseOSLayer` fails if these files + // already exist. `SetupBaseOSLayer` will create these files internally. We also remove the base and + // differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(hivesPath); err == nil { + if err := os.RemoveAll(hivesPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting hives directory") + } + } + if _, err := os.Stat(layoutPath); err == nil { + if err := os.RemoveAll(layoutPath); err != nil { + return errors.Wrap(err, "failed to remove prexisting layout file") + } + } + + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx path") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + if err = FormatWritableLayerVhd(ctx, windows.Handle(handle)); err != nil { + return err + } + // Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + options := OsLayerOptions{ + Type: OsLayerTypeContainer, + } + + // SetupBaseOSLayer expects an empty vhd handle for a container layer and will + // error out otherwise. + if err = SetupBaseOSLayer(ctx, layerPath, 0, options); err != nil { + return err + } + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err = security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err = security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} + +// SetupUtilityVMBaseLayer is a helper to setup a UVMs scratch space. It will create and format +// the vhdx inside and the size is configurable by the sizeInGB parameter. +// +// `uvmPath` is the path to the UtilityVM filesystem. +// +// `baseVhdPath` is the path to where the base vhdx for the UVM should be created. +// +// `diffVhdPath` is the path where the differencing disk for the UVM should be created. +// +// `sizeInGB` specifies the size in gigabytes to make the base vhdx. +func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdPath string, sizeInGB uint64) (err error) { + // Remove the base and differencing disks if they exist in case we're asking for a different size. + if _, err := os.Stat(baseVhdPath); err == nil { + if err := os.RemoveAll(baseVhdPath); err != nil { + return errors.Wrap(err, "failed to remove base vhdx") + } + } + if _, err := os.Stat(diffVhdPath); err == nil { + if err := os.RemoveAll(diffVhdPath); err != nil { + return errors.Wrap(err, "failed to remove differencing vhdx") + } + } + + // Just create the vhdx for utilityVM layer, no need to format it. + createParams := &vhd.CreateVirtualDiskParameters{ + Version: 2, + Version2: vhd.CreateVersion2{ + MaximumSize: sizeInGB * 1024 * 1024 * 1024, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + }, + } + handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) + if err != nil { + return errors.Wrap(err, "failed to create vhdx") + } + + defer func() { + if err != nil { + _ = syscall.CloseHandle(handle) + os.RemoveAll(baseVhdPath) + os.RemoveAll(diffVhdPath) + } + }() + + // If it is a UtilityVM layer then the base vhdx must be attached when calling + // `SetupBaseOSLayer` + attachParams := &vhd.AttachVirtualDiskParameters{ + Version: 2, + } + if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil { + return errors.Wrapf(err, "failed to attach virtual disk") + } + + options := OsLayerOptions{ + Type: OsLayerTypeVM, + } + if err := SetupBaseOSLayer(ctx, uvmPath, windows.Handle(handle), options); err != nil { + return err + } + + // Detach and close the handle after setting up the layer as we don't need the handle + // for anything else and we no longer need to be attached either. + if err = vhd.DetachVirtualDisk(handle); err != nil { + return errors.Wrap(err, "failed to detach vhdx") + } + if err = syscall.CloseHandle(handle); err != nil { + return errors.Wrap(err, "failed to close vhdx handle") + } + + // Create the differencing disk that will be what's copied for the final rw layer + // for a container. + if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil { + return errors.Wrap(err, "failed to create differencing disk") + } + + if err := security.GrantVmGroupAccess(baseVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath) + } + if err := security.GrantVmGroupAccess(diffVhdPath); err != nil { + return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath) + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go new file mode 100644 index 00000000000..0c61dab3291 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go @@ -0,0 +1,41 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// ImportLayer imports a container layer. +// +// `layerPath` is a path to a directory to import the layer to. If the directory +// does not exist it will be automatically created. +// +// `sourceFolderpath` is a pre-existing folder that contains the layer to +// import. +// +// `layerData` is the parent layer data. +func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { + title := "hcsshim.ImportLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("sourceFolderPath", sourceFolderPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to import layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go new file mode 100644 index 00000000000..53ed8ea6eda --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go @@ -0,0 +1,38 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" +) + +// InitializeWritableLayer initializes a writable layer for a container. +// +// `layerPath` is a path to a directory the layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { + title := "hcsshim.InitializeWritableLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + // Options are not used in the platform as of RS5 + err = hcsInitializeWritableLayer(layerPath, string(bytes), "") + if err != nil { + return errors.Wrap(err, "failed to intitialize container layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go new file mode 100644 index 00000000000..fcdbbef8143 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go @@ -0,0 +1,27 @@ +package computestorage + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. +func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { + title := "hcsshim.GetLayerVhdMountPath" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var mountPath *uint16 + err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath) + if err != nil { + return "", errors.Wrap(err, "failed to get vhd mount path") + } + path = interop.ConvertAndFreeCoTaskMemString(mountPath) + return path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go new file mode 100644 index 00000000000..06aaf841e8f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go @@ -0,0 +1,74 @@ +package computestorage + +import ( + "context" + "encoding/json" + + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +// SetupBaseOSLayer sets up a layer that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `vhdHandle` is an empty file handle of `options.Type == OsLayerTypeContainer` +// or else it is a file handle to the 'SystemTemplateBase.vhdx' if `options.Type +// == OsLayerTypeVm`. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { + title := "hcsshim.SetupBaseOSLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} + +// SetupBaseOSVolume sets up a volume that contains a base OS for a container. +// +// `layerPath` is a path to a directory containing the layer. +// +// `volumePath` is the path to the volume to be used for setup. +// +// `options` are the options applied while processing the layer. +func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { + if osversion.Build() < 19645 { + return errors.New("SetupBaseOSVolume is not present on builds older than 19645") + } + title := "hcsshim.SetupBaseOSVolume" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("layerPath", layerPath), + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(options) + if err != nil { + return err + } + + err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to setup base OS layer") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go new file mode 100644 index 00000000000..95aff9c1848 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -0,0 +1,50 @@ +// Package computestorage is a wrapper around the HCS storage APIs. These are new storage APIs introduced +// separate from the original graphdriver calls intended to give more freedom around creating +// and managing container layers and scratch spaces. +package computestorage + +import ( + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go + +//sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? +//sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? +//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? +//sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? +//sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? +//sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? +//sys hcsDetachLayerStorageFilter(layerPath string) (hr error) = computestorage.HcsDetachLayerStorageFilter? +//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? +//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? +//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? + +// LayerData is the data used to describe parent layer information. +type LayerData struct { + SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` + Layers []hcsschema.Layer `json:"Layers,omitempty"` +} + +// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. +type ExportLayerOptions struct { + IsWritableLayer bool `json:"IsWritableLayer,omitempty"` +} + +// OsLayerType is the type of layer being operated on. +type OsLayerType string + +const ( + // OsLayerTypeContainer is a container layer. + OsLayerTypeContainer OsLayerType = "Container" + // OsLayerTypeVM is a virtual machine layer. + OsLayerTypeVM OsLayerType = "Vm" +) + +// OsLayerOptions are the set of options that are used with the `SetupBaseOSLayer` and +// `SetupBaseOSVolume` calls. +type OsLayerOptions struct { + Type OsLayerType `json:"Type,omitempty"` + DisableCiCacheOptimization bool `json:"DisableCiCacheOptimization,omitempty"` + SkipUpdateBcdForBoot bool `json:"SkipUpdateBcdForBoot,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go new file mode 100644 index 00000000000..4f951806743 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -0,0 +1,319 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package computestorage + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") + + procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") + procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") + procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") + procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") + procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") + procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") + procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") + procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") +) + +func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsImportLayer(_p0, _p1, _p2) +} + +func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(exportFolderPath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p3 *uint16 + _p3, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsExportLayer(_p0, _p1, _p2, _p3) +} + +func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDestroyLayer(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDestroyLayer(_p0) +} + +func _hcsDestroyLayer(layerPath *uint16) (hr error) { + if hr = procHcsDestoryLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSLayer(_p0, handle, _p1) +} + +func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(writableLayerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsInitializeWritableLayer(_p0, _p1, _p2) +} + +func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { + if hr = procHcsInitializeWritableLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachLayerStorageFilter(_p0, _p1) +} + +func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { + if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDetachLayerStorageFilter(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDetachLayerStorageFilter(_p0) +} + +func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { + if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { + if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { + if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSetupBaseOSVolume(_p0, _p1, _p2) +} + +func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { + if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go new file mode 100644 index 00000000000..bfd722898e9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -0,0 +1,223 @@ +package hcsshim + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + "github.com/Microsoft/hcsshim/internal/mergemaps" +) + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties = schema1.ContainerProperties + +// MemoryStats holds the memory statistics for a container +type MemoryStats = schema1.MemoryStats + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats = schema1.ProcessorStats + +// StorageStats holds the storage statistics for a container +type StorageStats = schema1.StorageStats + +// NetworkStats holds the network statistics for a container +type NetworkStats = schema1.NetworkStats + +// Statistics is the structure returned by a statistics call on a container +type Statistics = schema1.Statistics + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem = schema1.ProcessListItem + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController = schema1.MappedVirtualDiskController + +// Type of Request Support in ModifySystem +type RequestType = schema1.RequestType + +// Type of Resource Support in ModifySystem +type ResourceType = schema1.ResourceType + +// RequestType const +const ( + Add = schema1.Add + Remove = schema1.Remove + Network = schema1.Network +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse + +type container struct { + system *hcs.System + waitOnce sync.Once + waitErr error + waitCh chan struct{} +} + +// createComputeSystemAdditionalJSON is read from the environment at initialisation +// time. It allows an environment variable to define additional JSON which +// is merged in the CreateComputeSystem call to HCS. +var createContainerAdditionalJSON []byte + +func init() { + createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")) +} + +// CreateContainer creates a new container with the given configuration but does not start it. +func CreateContainer(id string, c *ContainerConfig) (Container, error) { + fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) + if err != nil { + return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) + } + + system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) + if err != nil { + return nil, err + } + return &container{system: system}, err +} + +// OpenContainer opens an existing container by ID. +func OpenContainer(id string) (Container, error) { + system, err := hcs.OpenComputeSystem(context.Background(), id) + if err != nil { + return nil, err + } + return &container{system: system}, err +} + +// GetContainers gets a list of the containers on the system that match the query +func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) { + return hcs.GetComputeSystems(context.Background(), q) +} + +// Start synchronously starts the container. +func (container *container) Start() error { + return convertSystemError(container.system.Start(context.Background()), container) +} + +// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. +func (container *container) Shutdown() error { + err := container.system.Shutdown(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Shutdown"} +} + +// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. +func (container *container) Terminate() error { + err := container.system.Terminate(context.Background()) + if err != nil { + return convertSystemError(err, container) + } + return &ContainerError{Container: container, Err: ErrVmcomputeOperationPending, Operation: "hcsshim::ComputeSystem::Terminate"} +} + +// Waits synchronously waits for the container to shutdown or terminate. +func (container *container) Wait() error { + err := container.system.Wait() + if err == nil { + err = container.system.ExitError() + } + return convertSystemError(err, container) +} + +// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It +// returns false if timeout occurs. +func (container *container) WaitTimeout(timeout time.Duration) error { + container.waitOnce.Do(func() { + container.waitCh = make(chan struct{}) + go func() { + container.waitErr = container.Wait() + close(container.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ContainerError{Container: container, Err: ErrTimeout, Operation: "hcsshim::ComputeSystem::Wait"} + case <-container.waitCh: + return container.waitErr + } +} + +// Pause pauses the execution of a container. +func (container *container) Pause() error { + return convertSystemError(container.system.Pause(context.Background()), container) +} + +// Resume resumes the execution of a container. +func (container *container) Resume() error { + return convertSystemError(container.system.Resume(context.Background()), container) +} + +// HasPendingUpdates returns true if the container has updates pending to install +func (container *container) HasPendingUpdates() (bool, error) { + return false, nil +} + +// Statistics returns statistics for the container. This is a legacy v1 call +func (container *container) Statistics() (Statistics, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeStatistics) + if err != nil { + return Statistics{}, convertSystemError(err, container) + } + + return properties.Statistics, nil +} + +// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call +func (container *container) ProcessList() ([]ProcessListItem, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeProcessList) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.ProcessList, nil +} + +// This is a legacy v1 call +func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) { + properties, err := container.system.Properties(context.Background(), schema1.PropertyTypeMappedVirtualDisk) + if err != nil { + return nil, convertSystemError(err, container) + } + + return properties.MappedVirtualDiskControllers, nil +} + +// CreateProcess launches a new process within the container. +func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { + p, err := container.system.CreateProcess(context.Background(), c) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p: p.(*hcs.Process)}, nil +} + +// OpenProcess gets an interface to an existing process within the container. +func (container *container) OpenProcess(pid int) (Process, error) { + p, err := container.system.OpenProcess(context.Background(), pid) + if err != nil { + return nil, convertSystemError(err, container) + } + return &process{p: p}, nil +} + +// Close cleans up any state associated with the container but does not terminate or wait for it. +func (container *container) Close() error { + return convertSystemError(container.system.Close(), container) +} + +// Modify the System +func (container *container) Modify(config *ResourceModificationRequestResponse) error { + return convertSystemError(container.system.Modify(context.Background(), config), container) +} diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go new file mode 100644 index 00000000000..f367022e712 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -0,0 +1,245 @@ +package hcsshim + +import ( + "fmt" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hns" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist + ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = hcs.ErrElementNotFound + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = hcs.ErrNotSupported + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = hcs.ErrInvalidData + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = hcs.ErrHandleClose + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = hcs.ErrAlreadyClosed + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = hcs.ErrInvalidNotificationType + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = hcs.ErrInvalidProcessState + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = hcs.ErrTimeout + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = hcs.ErrUnexpectedValue + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState + + // ErrProcNotFound is an error encountered when a procedure look up fails. + ErrProcNotFound = hcs.ErrProcNotFound + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = hcs.ErrPlatformNotSupported +) + +type EndpointNotFoundError = hns.EndpointNotFoundError +type NetworkNotFoundError = hns.NetworkNotFoundError + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + Process *process + Operation string + Err error + Events []hcs.ErrorEvent +} + +// ContainerError is an error encountered in HCS during an operation on a Container object +type ContainerError struct { + Container *container + Operation string + Err error + Events []hcs.ErrorEvent +} + +func (e *ContainerError) Error() string { + if e == nil { + return "" + } + + if e.Container == nil { + return "unexpected nil container for error: " + e.Err.Error() + } + + s := "container " + e.Container.system.ID() + + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + return s +} + +func (e *ProcessError) Error() string { + if e == nil { + return "" + } + + if e.Process == nil { + return "Unexpected nil process for error: " + e.Err.Error() + } + + s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID()) + if e.Operation != "" { + s += " encountered an error during " + e.Operation + } + + switch e.Err.(type) { + case nil: + break + case syscall.Errno: + s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err)) + default: + s += fmt.Sprintf(": %s", e.Err.Error()) + } + + for _, ev := range e.Events { + s += "\n" + ev.String() + } + + return s +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsNotExist(err error) bool { + if _, ok := err.(EndpointNotFoundError); ok { + return true + } + if _, ok := err.(NetworkNotFoundError); ok { + return true + } + return hcs.IsNotExist(getInnerError(err)) +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + return hcs.IsAlreadyClosed(getInnerError(err)) +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + return hcs.IsPending(getInnerError(err)) +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + return hcs.IsTimeout(getInnerError(err)) +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsAlreadyStopped(err error) bool { + return hcs.IsAlreadyStopped(getInnerError(err)) +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + return hcs.IsNotSupported(getInnerError(err)) +} + +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + return hcs.IsOperationInvalidState(getInnerError(err)) +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + return hcs.IsAccessIsDenied(getInnerError(err)) +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *ContainerError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} + +func convertSystemError(err error, c *container) error { + if serr, ok := err.(*hcs.SystemError); ok { + return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} + } + return err +} + +func convertProcessError(err error, p *process) error { + if perr, ok := err.(*hcs.ProcessError); ok { + return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 new file mode 100644 index 00000000000..ce6edbcf329 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 @@ -0,0 +1,12 @@ +# Requirements so far: +# dockerd running +# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar +# - image alpine (linux) docker pull --platform=linux alpine + + +# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true" +#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please" + +#pushd uvm +go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./... +#popd \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go new file mode 100644 index 00000000000..ceb3ac85ee4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go @@ -0,0 +1,28 @@ +// Shim for the Host Compute Service (HCS) to manage Windows Server +// containers and Hyper-V containers. + +package hcsshim + +import ( + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" +) + +//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go + +//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId + +const ( + // Specific user-visible exit codes + WaitErrExecFailed = 32767 + + ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE + ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) + WSAEINVAL = syscall.Errno(10022) + + // Timeout on wait calls + TimeoutInfinite = 0xFFFFFFFF +) + +type HcsError = hcserror.HcsError diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go new file mode 100644 index 00000000000..9e0059447d9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -0,0 +1,118 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint = hns.HNSEndpoint + +// HNSEndpointStats represent the stats for an networkendpoint in HNS +type HNSEndpointStats = hns.EndpointStats + +// Namespace represents a Compartment. +type Namespace = hns.Namespace + +//SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse = hns.EndpointResquestResponse + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + return hns.HNSEndpointRequest(method, path, request) +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + return hns.HNSListEndpointRequest() +} + +// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container +func HotAttachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if isAttached { + return err + } + return modifyNetworkEndpoint(containerID, endpointID, Add) +} + +// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container +func HotDetachEndpoint(containerID string, endpointID string) error { + endpoint, err := GetHNSEndpointByID(endpointID) + if err != nil { + return err + } + isAttached, err := endpoint.IsAttached(containerID) + if !isAttached { + return err + } + return modifyNetworkEndpoint(containerID, endpointID, Remove) +} + +// ModifyContainer corresponding to the container id, by sending a request +func modifyContainer(id string, request *ResourceModificationRequestResponse) error { + container, err := OpenContainer(id) + if err != nil { + if IsNotExist(err) { + return ErrComputeSystemDoesNotExist + } + return getInnerError(err) + } + defer container.Close() + err = container.Modify(request) + if err != nil { + if IsNotSupported(err) { + return ErrPlatformNotSupported + } + return getInnerError(err) + } + + return nil +} + +func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { + requestMessage := &ResourceModificationRequestResponse{ + Resource: Network, + Request: request, + Data: endpointID, + } + err := modifyContainer(containerID, requestMessage) + + if err != nil { + return err + } + + return nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByID(endpointID) +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + return hns.GetHNSEndpointByName(endpointName) +} + +// GetHNSEndpointStats gets the endpoint stats by ID +func GetHNSEndpointStats(endpointName string) (*HNSEndpointStats, error) { + return hns.GetHNSEndpointStats(endpointName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go new file mode 100644 index 00000000000..2b538190476 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go @@ -0,0 +1,16 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSGlobals = hns.HNSGlobals +type HNSVersion = hns.HNSVersion + +var ( + HNSVersion1803 = hns.HNSVersion1803 +) + +func GetHNSGlobals() (*HNSGlobals, error) { + return hns.GetHNSGlobals() +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go new file mode 100644 index 00000000000..f775fa1d07c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go @@ -0,0 +1,36 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet = hns.Subnet + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool = hns.MacPool + +// HNSNetwork represents a network in HNS +type HNSNetwork = hns.HNSNetwork + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + return hns.HNSNetworkRequest(method, path, request) +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + return hns.HNSListNetworkRequest(method, path, request) +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByID(networkID) +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + return hns.GetHNSNetworkByName(networkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go new file mode 100644 index 00000000000..00ab2636449 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go @@ -0,0 +1,60 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// Type of Request Support in ModifySystem +type PolicyType = hns.PolicyType + +// RequestType const +const ( + Nat = hns.Nat + ACL = hns.ACL + PA = hns.PA + VLAN = hns.VLAN + VSID = hns.VSID + VNet = hns.VNet + L2Driver = hns.L2Driver + Isolation = hns.Isolation + QOS = hns.QOS + OutboundNat = hns.OutboundNat + ExternalLoadBalancer = hns.ExternalLoadBalancer + Route = hns.Route + Proxy = hns.Proxy +) + +type ProxyPolicy = hns.ProxyPolicy + +type NatPolicy = hns.NatPolicy + +type QosPolicy = hns.QosPolicy + +type IsolationPolicy = hns.IsolationPolicy + +type VlanPolicy = hns.VlanPolicy + +type VsidPolicy = hns.VsidPolicy + +type PaPolicy = hns.PaPolicy + +type OutboundNatPolicy = hns.OutboundNatPolicy + +type ActionType = hns.ActionType +type DirectionType = hns.DirectionType +type RuleType = hns.RuleType + +const ( + Allow = hns.Allow + Block = hns.Block + + In = hns.In + Out = hns.Out + + Host = hns.Host + Switch = hns.Switch +) + +type ACLPolicy = hns.ACLPolicy + +type Policy = hns.Policy diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go new file mode 100644 index 00000000000..55aaa4a50ef --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go @@ -0,0 +1,47 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy = hns.RoutePolicy + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy = hns.ELBPolicy + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy = hns.LBPolicy + +// PolicyList is a structure defining schema for Policy list request +type PolicyList = hns.PolicyList + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.HNSPolicyListRequest(method, path, request) +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + return hns.HNSListPolicyListRequest() +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + return hns.PolicyListRequest(method, path, request) +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return hns.GetPolicyListByID(policyListID) +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go new file mode 100644 index 00000000000..69405244b67 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hnssupport.go @@ -0,0 +1,13 @@ +package hcsshim + +import ( + "github.com/Microsoft/hcsshim/internal/hns" +) + +type HNSSupportedFeatures = hns.HNSSupportedFeatures + +type HNSAclFeatures = hns.HNSAclFeatures + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + return hns.GetHNSSupportedFeatures() +} diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go new file mode 100644 index 00000000000..300eb599668 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -0,0 +1,114 @@ +package hcsshim + +import ( + "io" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig = schema1.ProcessConfig + +type Layer = schema1.Layer +type MappedDir = schema1.MappedDir +type MappedPipe = schema1.MappedPipe +type HvRuntime = schema1.HvRuntime +type MappedVirtualDisk = schema1.MappedVirtualDisk + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice = schema1.AssignedDevice + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig = schema1.ContainerConfig + +type ComputeSystemQuery = schema1.ComputeSystemQuery + +// Container represents a created (but not necessarily running) container. +type Container interface { + // Start synchronously starts the container. + Start() error + + // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. + Shutdown() error + + // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. + Terminate() error + + // Waits synchronously waits for the container to shutdown or terminate. + Wait() error + + // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It + // returns false if timeout occurs. + WaitTimeout(time.Duration) error + + // Pause pauses the execution of a container. + Pause() error + + // Resume resumes the execution of a container. + Resume() error + + // HasPendingUpdates returns true if the container has updates pending to install. + HasPendingUpdates() (bool, error) + + // Statistics returns statistics for a container. + Statistics() (Statistics, error) + + // ProcessList returns details for the processes in a container. + ProcessList() ([]ProcessListItem, error) + + // MappedVirtualDisks returns virtual disks mapped to a utility VM, indexed by controller + MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) + + // CreateProcess launches a new process within the container. + CreateProcess(c *ProcessConfig) (Process, error) + + // OpenProcess gets an interface to an existing process within the container. + OpenProcess(pid int) (Process, error) + + // Close cleans up any state associated with the container but does not terminate or wait for it. + Close() error + + // Modify the System + Modify(config *ResourceModificationRequestResponse) error +} + +// Process represents a running or exited process. +type Process interface { + // Pid returns the process ID of the process within the container. + Pid() int + + // Kill signals the process to terminate but does not wait for it to finish terminating. + Kill() error + + // Wait waits for the process to exit. + Wait() error + + // WaitTimeout waits for the process to exit or the duration to elapse. It returns + // false if timeout occurs. + WaitTimeout(time.Duration) error + + // ExitCode returns the exit code of the process. The process must have + // already terminated. + ExitCode() (int, error) + + // ResizeConsole resizes the console of the process. + ResizeConsole(width, height uint16) error + + // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing + // these pipes does not close the underlying pipes; it should be possible to + // call this multiple times to get multiple interfaces. + Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) + + // CloseStdin closes the write side of the stdin pipe so that the process is + // notified on the read side that there is no more data in stdin. + CloseStdin() error + + // Close cleans up any state associated with the process but does not kill + // or wait on it. + Close() error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go new file mode 100644 index 00000000000..27a62a72386 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -0,0 +1,91 @@ +package cow + +import ( + "context" + "io" + + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// Process is the interface for an OS process running in a container or utility VM. +type Process interface { + // Close releases resources associated with the process and closes the + // writer and readers returned by Stdio. Depending on the implementation, + // this may also terminate the process. + Close() error + // CloseStdin causes the process's stdin handle to receive EOF/EPIPE/whatever + // is appropriate to indicate that no more data is available. + CloseStdin(ctx context.Context) error + // CloseStdout closes the stdout connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStdout(ctx context.Context) error + // CloseStderr closes the stderr connection to the process. It is used to indicate + // that we are done receiving output on the shim side. + CloseStderr(ctx context.Context) error + // Pid returns the process ID. + Pid() int + // Stdio returns the stdio streams for a process. These may be nil if a stream + // was not requested during CreateProcess. + Stdio() (_ io.Writer, _ io.Reader, _ io.Reader) + // ResizeConsole resizes the virtual terminal associated with the process. + ResizeConsole(ctx context.Context, width, height uint16) error + // Kill sends a SIGKILL or equivalent signal to the process and returns whether + // the signal was delivered. It does not wait for the process to terminate. + Kill(ctx context.Context) (bool, error) + // Signal sends a signal to the process and returns whether the signal was + // delivered. The input is OS specific (either + // guestrequest.SignalProcessOptionsWCOW or + // guestrequest.SignalProcessOptionsLCOW). It does not wait for the process + // to terminate. + Signal(ctx context.Context, options interface{}) (bool, error) + // Wait waits for the process to complete, or for a connection to the process to be + // terminated by some error condition (including calling Close). + Wait() error + // ExitCode returns the exit code of the process. Returns an error if the process is + // not running. + ExitCode() (int, error) +} + +// ProcessHost is the interface for creating processes. +type ProcessHost interface { + // CreateProcess creates a process. The configuration is host specific + // (either hcsschema.ProcessParameters or lcow.ProcessParameters). + CreateProcess(ctx context.Context, config interface{}) (Process, error) + // OS returns the host's operating system, "linux" or "windows". + OS() string + // IsOCI specifies whether this is an OCI-compliant process host. If true, + // then the configuration passed to CreateProcess should have an OCI process + // spec (or nil if this is the initial process in an OCI container). + // Otherwise, it should have the HCS-specific process parameters. + IsOCI() bool +} + +// Container is the interface for container objects, either running on the host or +// in a utility VM. +type Container interface { + ProcessHost + // Close releases the resources associated with the container. Depending on + // the implementation, this may also terminate the container. + Close() error + // ID returns the container ID. + ID() string + // Properties returns the requested container properties targeting a V1 schema container. + Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) + // PropertiesV2 returns the requested container properties targeting a V2 schema container. + PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) + // Start starts a container. + Start(ctx context.Context) error + // Shutdown sends a shutdown request to the container (but does not wait for + // the shutdown to complete). + Shutdown(ctx context.Context) error + // Terminate sends a terminate request to the container (but does not wait + // for the terminate to complete). + Terminate(ctx context.Context) error + // Wait waits for the container to terminate, or for the connection to the + // container to be terminated by some error condition (including calling + // Close). + Wait() error + // Modify sends a request to modify container resources + Modify(ctx context.Context, config interface{}) error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go new file mode 100644 index 00000000000..d13772b0301 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -0,0 +1,161 @@ +package hcs + +import ( + "fmt" + "sync" + "syscall" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "github.com/sirupsen/logrus" +) + +var ( + nextCallback uintptr + callbackMap = map[uintptr]*notificationWatcherContext{} + callbackMapLock = sync.RWMutex{} + + notificationWatcherCallback = syscall.NewCallback(notificationWatcher) + + // Notifications for HCS_SYSTEM handles + hcsNotificationSystemExited hcsNotification = 0x00000001 + hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 + hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 + hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 + hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 + hcsNotificationSystemCrashReport hcsNotification = 0x00000006 + hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007 + hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008 + hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009 + hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A + hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B + hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C + hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D + hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E + + // Notifications for HCS_PROCESS handles + hcsNotificationProcessExited hcsNotification = 0x00010000 + + // Common notifications + hcsNotificationInvalid hcsNotification = 0x00000000 + hcsNotificationServiceDisconnect hcsNotification = 0x01000000 +) + +type hcsNotification uint32 + +func (hn hcsNotification) String() string { + switch hn { + case hcsNotificationSystemExited: + return "SystemExited" + case hcsNotificationSystemCreateCompleted: + return "SystemCreateCompleted" + case hcsNotificationSystemStartCompleted: + return "SystemStartCompleted" + case hcsNotificationSystemPauseCompleted: + return "SystemPauseCompleted" + case hcsNotificationSystemResumeCompleted: + return "SystemResumeCompleted" + case hcsNotificationSystemCrashReport: + return "SystemCrashReport" + case hcsNotificationSystemSiloJobCreated: + return "SystemSiloJobCreated" + case hcsNotificationSystemSaveCompleted: + return "SystemSaveCompleted" + case hcsNotificationSystemRdpEnhancedModeStateChanged: + return "SystemRdpEnhancedModeStateChanged" + case hcsNotificationSystemShutdownFailed: + return "SystemShutdownFailed" + case hcsNotificationSystemGetPropertiesCompleted: + return "SystemGetPropertiesCompleted" + case hcsNotificationSystemModifyCompleted: + return "SystemModifyCompleted" + case hcsNotificationSystemCrashInitiated: + return "SystemCrashInitiated" + case hcsNotificationSystemGuestConnectionClosed: + return "SystemGuestConnectionClosed" + case hcsNotificationProcessExited: + return "ProcessExited" + case hcsNotificationInvalid: + return "Invalid" + case hcsNotificationServiceDisconnect: + return "ServiceDisconnect" + default: + return fmt.Sprintf("Unknown: %d", hn) + } +} + +type notificationChannel chan error + +type notificationWatcherContext struct { + channels notificationChannels + handle vmcompute.HcsCallback + + systemID string + processID int +} + +type notificationChannels map[hcsNotification]notificationChannel + +func newSystemChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationSystemExited, + hcsNotificationSystemCreateCompleted, + hcsNotificationSystemStartCompleted, + hcsNotificationSystemPauseCompleted, + hcsNotificationSystemResumeCompleted, + hcsNotificationSystemSaveCompleted, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func newProcessChannels() notificationChannels { + channels := make(notificationChannels) + for _, notif := range []hcsNotification{ + hcsNotificationServiceDisconnect, + hcsNotificationProcessExited, + } { + channels[notif] = make(notificationChannel, 1) + } + return channels +} + +func closeChannels(channels notificationChannels) { + for _, c := range channels { + close(c) + } +} + +func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { + var result error + if int32(notificationStatus) < 0 { + result = interop.Win32FromHresult(notificationStatus) + } + + callbackMapLock.RLock() + context := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if context == nil { + return 0 + } + + log := logrus.WithFields(logrus.Fields{ + "notification-type": notificationType.String(), + "system-id": context.systemID, + }) + if context.processID != 0 { + log.Data[logfields.ProcessID] = context.processID + } + log.Debug("HCS notification") + + if channel, ok := context.channels[notificationType]; ok { + channel <- result + } + + return 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go new file mode 100644 index 00000000000..e21354ffd66 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -0,0 +1,343 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "syscall" + + "github.com/Microsoft/hcsshim/internal/log" +) + +var ( + // ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists + ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrElementNotFound = syscall.Errno(0x490) + + // ErrElementNotFound is an error encountered when the object being referenced does not exist + ErrNotSupported = syscall.Errno(0x32) + + // ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported + // decimal -2147024883 / hex 0x8007000d + ErrInvalidData = syscall.Errno(0xd) + + // ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed + ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") + + // ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method + ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed") + + // ErrInvalidNotificationType is an error encountered when an invalid notification type is used + ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") + + // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation + ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") + + // ErrTimeout is an error encountered when waiting on a notification times out + ErrTimeout = errors.New("hcsshim: timeout waiting for notification") + + // ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for + // a different expected notification + ErrUnexpectedContainerExit = errors.New("unexpected container exit") + + // ErrUnexpectedProcessAbort is the error encountered when communication with the compute service + // is lost while waiting for a notification + ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") + + // ErrUnexpectedValue is an error encountered when hcs returns an invalid value + ErrUnexpectedValue = errors.New("unexpected value returned from hcs") + + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container + ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) + + // ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously + ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) + + // ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation + ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105) + + // ErrProcNotFound is an error encountered when a procedure look up fails. + ErrProcNotFound = syscall.Errno(0x7f) + + // ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2 + // builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3. + ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5) + + // ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management + ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d) + + // ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message + ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b) + + // ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly + ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106) + + // ErrNotSupported is an error encountered when hcs doesn't support the request + ErrPlatformNotSupported = errors.New("unsupported platform request") + + // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. + ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) + + // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that + // compute system has already been closed. + ErrInvalidHandle = syscall.Errno(0x6) +) + +type ErrorEvent struct { + Message string `json:"Message,omitempty"` // Fully formated error message + StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form + Provider string `json:"Provider,omitempty"` + EventID uint16 `json:"EventId,omitempty"` + Flags uint32 `json:"Flags,omitempty"` + Source string `json:"Source,omitempty"` + //Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function) +} + +type hcsResult struct { + Error int32 + ErrorMessage string + ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"` +} + +func (ev *ErrorEvent) String() string { + evs := "[Event Detail: " + ev.Message + if ev.StackTrace != "" { + evs += " Stack Trace: " + ev.StackTrace + } + if ev.Provider != "" { + evs += " Provider: " + ev.Provider + } + if ev.EventID != 0 { + evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID) + } + if ev.Flags != 0 { + evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags) + } + if ev.Source != "" { + evs += " Source: " + ev.Source + } + evs += "]" + return evs +} + +func processHcsResult(ctx context.Context, resultJSON string) []ErrorEvent { + if resultJSON != "" { + result := &hcsResult{} + if err := json.Unmarshal([]byte(resultJSON), result); err != nil { + log.G(ctx).WithError(err).Warning("Could not unmarshal HCS result") + return nil + } + return result.ErrorEvents + } + return nil +} + +type HcsError struct { + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &HcsError{} + +func (e *HcsError) Error() string { + s := e.Op + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *HcsError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *HcsError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + SystemID string + Pid int + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &ProcessError{} + +// SystemError is an error encountered in HCS during an operation on a Container object +type SystemError struct { + ID string + Op string + Err error + Events []ErrorEvent +} + +var _ net.Error = &SystemError{} + +func (e *SystemError) Error() string { + s := e.Op + " " + e.ID + ": " + e.Err.Error() + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *SystemError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *SystemError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*SystemError); ok { + return err + } + return &SystemError{ + ID: system.ID(), + Op: op, + Err: err, + Events: events, + } +} + +func (e *ProcessError) Error() string { + s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) + for _, ev := range e.Events { + s += "\n" + ev.String() + } + return s +} + +func (e *ProcessError) Temporary() bool { + err, ok := e.Err.(net.Error) + return ok && err.Temporary() +} + +func (e *ProcessError) Timeout() bool { + err, ok := e.Err.(net.Error) + return ok && err.Timeout() +} + +func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { + // Don't double wrap errors + if _, ok := err.(*ProcessError); ok { + return err + } + return &ProcessError{ + Pid: process.Pid(), + SystemID: process.SystemID(), + Op: op, + Err: err, + Events: events, + } +} + +// IsNotExist checks if an error is caused by the Container or Process not existing. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsNotExist(err error) bool { + err = getInnerError(err) + return err == ErrComputeSystemDoesNotExist || + err == ErrElementNotFound +} + +// IsErrorInvalidHandle checks whether the error is the result of an operation carried +// out on a handle that is invalid/closed. This error popped up while trying to query +// stats on a container in the process of being stopped. +func IsErrorInvalidHandle(err error) bool { + err = getInnerError(err) + return err == ErrInvalidHandle +} + +// IsAlreadyClosed checks if an error is caused by the Container or Process having been +// already closed by a call to the Close() method. +func IsAlreadyClosed(err error) bool { + err = getInnerError(err) + return err == ErrAlreadyClosed +} + +// IsPending returns a boolean indicating whether the error is that +// the requested operation is being completed in the background. +func IsPending(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationPending +} + +// IsTimeout returns a boolean indicating whether the error is caused by +// a timeout waiting for the operation to complete. +func IsTimeout(err error) bool { + if err, ok := err.(net.Error); ok && err.Timeout() { + return true + } + err = getInnerError(err) + return err == ErrTimeout +} + +// IsAlreadyStopped returns a boolean indicating whether the error is caused by +// a Container or Process being already stopped. +// Note: Currently, ErrElementNotFound can mean that a Process has either +// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist +// will currently return true when the error is ErrElementNotFound. +func IsAlreadyStopped(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeAlreadyStopped || + err == ErrProcessAlreadyStopped || + err == ErrElementNotFound +} + +// IsNotSupported returns a boolean indicating whether the error is caused by +// unsupported platform requests +// Note: Currently Unsupported platform requests can be mean either +// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage +// is thrown from the Platform +func IsNotSupported(err error) bool { + err = getInnerError(err) + // If Platform doesn't recognize or support the request sent, below errors are seen + return err == ErrVmcomputeInvalidJSON || + err == ErrInvalidData || + err == ErrNotSupported || + err == ErrVmcomputeUnknownMessage +} + +// IsOperationInvalidState returns true when err is caused by +// `ErrVmcomputeOperationInvalidState`. +func IsOperationInvalidState(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationInvalidState +} + +// IsAccessIsDenied returns true when err is caused by +// `ErrVmcomputeOperationAccessIsDenied`. +func IsAccessIsDenied(err error) bool { + err = getInnerError(err) + return err == ErrVmcomputeOperationAccessIsDenied +} + +func getInnerError(err error) error { + switch pe := err.(type) { + case nil: + return nil + case *HcsError: + err = pe.Err + case *SystemError: + err = pe.Err + case *ProcessError: + err = pe.Err + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go new file mode 100644 index 00000000000..f4605922ab4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -0,0 +1,557 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "io" + "os" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "go.opencensus.io/trace" +) + +// ContainerError is an error encountered in HCS +type Process struct { + handleLock sync.RWMutex + handle vmcompute.HcsProcess + processID int + system *System + hasCachedStdio bool + stdioLock sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + callbackNumber uintptr + killSignalDelivered bool + + closedWaitOnce sync.Once + waitBlock chan struct{} + exitCode int + waitError error +} + +func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { + return &Process{ + handle: process, + processID: processID, + system: computeSystem, + waitBlock: make(chan struct{}), + } +} + +type processModifyRequest struct { + Operation string + ConsoleSize *consoleSize `json:",omitempty"` + CloseHandle *closeHandle `json:",omitempty"` +} + +type consoleSize struct { + Height uint16 + Width uint16 +} + +type closeHandle struct { + Handle string +} + +type processStatus struct { + ProcessID uint32 + Exited bool + ExitCode uint32 + LastWaitResult int32 +} + +const stdIn string = "StdIn" + +const ( + modifyConsoleSize string = "ConsoleSize" + modifyCloseHandle string = "CloseHandle" +) + +// Pid returns the process ID of the process within the container. +func (process *Process) Pid() int { + return process.processID +} + +// SystemID returns the ID of the process's compute system. +func (process *Process) SystemID() string { + return process.system.ID() +} + +func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { + switch err { + case nil: + return true, nil + case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + select { + case <-process.waitBlock: + // The process exit notification has already arrived. + default: + // The process should be gone, but we have not received the notification. + // After a second, force unblock the process wait to work around a possible + // deadlock in the HCS. + go func() { + time.Sleep(time.Second) + process.closedWaitOnce.Do(func() { + log.G(ctx).WithError(err).Warn("force unblocking process waits") + process.exitCode = -1 + process.waitError = err + close(process.waitBlock) + }) + }() + } + return false, nil + default: + return false, err + } +} + +// Signal signals the process with `options`. +// +// For LCOW `guestrequest.SignalProcessOptionsLCOW`. +// +// For WCOW `guestrequest.SignalProcessOptionsWCOW`. +func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Signal" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + optionsb, err := json.Marshal(options) + if err != nil { + return false, err + } + + resultJSON, err := vmcompute.HcsSignalProcess(ctx, process.handle, string(optionsb)) + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(process, operation, err, events) + } + return delivered, err +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *Process) Kill(ctx context.Context) (bool, error) { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::Kill" + + if process.handle == 0 { + return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + if process.killSignalDelivered { + // A kill signal has already been sent to this process. Sending a second + // one offers no real benefit, as processes cannot stop themselves from + // being terminated, once a TerminateProcess has been issued. Sending a + // second kill may result in a number of errors (two of which detailed bellow) + // and which we can avoid handling. + return true, nil + } + + resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) + if err != nil { + // We still need to check these two cases, as processes may still be killed by an + // external actor (human operator, OOM, random script etc). + if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { + // There are two cases where it should be safe to ignore an error returned + // by HcsTerminateProcess. The first one is cause by the fact that + // HcsTerminateProcess ends up calling TerminateProcess in the context + // of a container. According to the TerminateProcess documentation: + // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks + // After a process has terminated, call to TerminateProcess with open + // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. + // It's safe to ignore this error here. HCS should always have permissions + // to kill processes inside any container. So an ERROR_ACCESS_DENIED + // is unlikely to be anything else than what the ending remarks in the + // documentation states. + // + // The second case is generated by hcs itself, if for any reason HcsTerminateProcess + // is called twice in a very short amount of time. In such cases, hcs may return + // HCS_E_PROCESS_ALREADY_STOPPED. + return true, nil + } + } + events := processHcsResult(ctx, resultJSON) + delivered, err := process.processSignalResult(ctx, err) + if err != nil { + err = makeProcessError(process, operation, err, events) + } + + process.killSignalDelivered = delivered + return delivered, err +} + +// waitBackground waits for the process exit notification. Once received sets +// `process.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `process.handle` but `Wait` is safe to +// call multiple times. +func (process *Process) waitBackground() { + operation := "hcs::Process::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + var ( + err error + exitCode = -1 + propertiesJSON string + resultJSON string + ) + + err = waitForNotification(ctx, process.callbackNumber, hcsNotificationProcessExited, nil) + if err != nil { + err = makeProcessError(process, operation, err, nil) + log.G(ctx).WithError(err).Error("failed wait") + } else { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + // Make sure we didnt race with Close() here + if process.handle != 0 { + propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + err = makeProcessError(process, operation, err, events) //nolint:ineffassign + } else { + properties := &processStatus{} + err = json.Unmarshal([]byte(propertiesJSON), properties) + if err != nil { + err = makeProcessError(process, operation, err, nil) //nolint:ineffassign + } else { + if properties.LastWaitResult != 0 { + log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") + } else { + exitCode = int(properties.ExitCode) + } + } + } + } + } + log.G(ctx).WithField("exitCode", exitCode).Debug("process exited") + + process.closedWaitOnce.Do(func() { + process.exitCode = exitCode + process.waitError = err + close(process.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +// Wait waits for the process to exit. If the process has already exited returns +// the pervious error (if any). +func (process *Process) Wait() error { + <-process.waitBlock + return process.waitError +} + +// ResizeConsole resizes the console of the process. +func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::ResizeConsole" + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyConsoleSize, + ConsoleSize: &consoleSize{ + Height: height, + Width: width, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + return nil +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *Process) ExitCode() (int, error) { + select { + case <-process.waitBlock: + if process.waitError != nil { + return -1, process.waitError + } + return process.exitCode, nil + default: + return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) + } +} + +// StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes. Once returned, these pipes +// are the responsibility of the caller to close. +func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { + operation := "hcs::Process::StdioLegacy" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + if process.handle == 0 { + return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.hasCachedStdio { + stdin, stdout, stderr := process.stdin, process.stdout, process.stderr + process.stdin, process.stdout, process.stderr = nil, nil, nil + process.hasCachedStdio = false + return stdin, stdout, stderr, nil + } + + processInfo, resultJSON, err := vmcompute.HcsGetProcessInfo(ctx, process.handle) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, events) + } + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, nil, nil, makeProcessError(process, operation, err, nil) + } + + return pipes[0], pipes[1], pipes[2], nil +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. +// To close them, close the process handle. +func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + return process.stdin, process.stdout, process.stderr +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *Process) CloseStdin(ctx context.Context) error { + process.handleLock.RLock() + defer process.handleLock.RUnlock() + + operation := "hcs::Process::CloseStdin" + + if process.handle == 0 { + return makeProcessError(process, operation, ErrAlreadyClosed, nil) + } + + modifyRequest := processModifyRequest{ + Operation: modifyCloseHandle, + CloseHandle: &closeHandle{ + Handle: stdIn, + }, + } + + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } + + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } + + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + process.stdioLock.Unlock() + + return nil +} + +func (process *Process) CloseStdout(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + return nil +} + +func (process *Process) CloseStderr(ctx context.Context) (err error) { + ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + + } + return nil +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *Process) Close() (err error) { + operation := "hcs::Process::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + + process.handleLock.Lock() + defer process.handleLock.Unlock() + + // Don't double free this + if process.handle == 0 { + return nil + } + + process.stdioLock.Lock() + if process.stdin != nil { + process.stdin.Close() + process.stdin = nil + } + if process.stdout != nil { + process.stdout.Close() + process.stdout = nil + } + if process.stderr != nil { + process.stderr.Close() + process.stderr = nil + } + process.stdioLock.Unlock() + + if err = process.unregisterCallback(ctx); err != nil { + return makeProcessError(process, operation, err, nil) + } + + if err = vmcompute.HcsCloseProcess(ctx, process.handle); err != nil { + return makeProcessError(process, operation, err, nil) + } + + process.handle = 0 + process.closedWaitOnce.Do(func() { + process.exitCode = -1 + process.waitError = ErrAlreadyClosed + close(process.waitBlock) + }) + + return nil +} + +func (process *Process) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newProcessChannels(), + systemID: process.SystemID(), + processID: process.processID, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterProcessCallback(ctx, process.handle, notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + process.callbackNumber = callbackNumber + + return nil +} + +func (process *Process) unregisterCallback(ctx context.Context) error { + callbackNumber := process.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // vmcompute.HcsUnregisterProcessCallback has its own synchronization to + // wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterProcessCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go new file mode 100644 index 00000000000..b621c559388 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go @@ -0,0 +1,250 @@ +package schema1 + +import ( + "encoding/json" + "time" + + "github.com/Microsoft/go-winio/pkg/guid" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// ProcessConfig is used as both the input of Container.CreateProcess +// and to convert the parameters to JSON for passing onto the HCS +type ProcessConfig struct { + ApplicationName string `json:",omitempty"` + CommandLine string `json:",omitempty"` + CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows + User string `json:",omitempty"` + WorkingDirectory string `json:",omitempty"` + Environment map[string]string `json:",omitempty"` + EmulateConsole bool `json:",omitempty"` + CreateStdInPipe bool `json:",omitempty"` + CreateStdOutPipe bool `json:",omitempty"` + CreateStdErrPipe bool `json:",omitempty"` + ConsoleSize [2]uint `json:",omitempty"` + CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows + OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows +} + +type Layer struct { + ID string + Path string +} + +type MappedDir struct { + HostPath string + ContainerPath string + ReadOnly bool + BandwidthMaximum uint64 + IOPSMaximum uint64 + CreateInUtilityVM bool + // LinuxMetadata - Support added in 1803/RS4+. + LinuxMetadata bool `json:",omitempty"` +} + +type MappedPipe struct { + HostPath string + ContainerPipeName string +} + +type HvRuntime struct { + ImagePath string `json:",omitempty"` + SkipTemplate bool `json:",omitempty"` + LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM + LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM + LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode + BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD + WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD +} + +type MappedVirtualDisk struct { + HostPath string `json:",omitempty"` // Path to VHD on the host + ContainerPath string // Platform-specific mount point path in the container + CreateInUtilityVM bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing" + AttachOnly bool `json:",omitempty"` +} + +// AssignedDevice represents a device that has been directly assigned to a container +// +// NOTE: Support added in RS5 +type AssignedDevice struct { + // InterfaceClassGUID of the device to assign to container. + InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"` +} + +// ContainerConfig is used as both the input of CreateContainer +// and to convert the parameters to JSON for passing onto the HCS +type ContainerConfig struct { + SystemType string // HCS requires this to be hard-coded to "Container" + Name string // Name of the container. We use the docker ID. + Owner string `json:",omitempty"` // The management platform that created this container + VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID} + IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows + LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID + Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID + Credentials string `json:",omitempty"` // Credentials information + ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container. + ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares. + ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit. + StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS + StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second + StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller + MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes + HostName string `json:",omitempty"` // Hostname + MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts) + MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes + HvPartition bool // True if it a Hyper-V Container + NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with. + EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container + HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM + Servicing bool `json:",omitempty"` // True if this container is for servicing + AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution + DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution + ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. + TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed + MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start + AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5 +} + +type ComputeSystemQuery struct { + IDs []string `json:"Ids,omitempty"` + Types []string `json:",omitempty"` + Names []string `json:",omitempty"` + Owners []string `json:",omitempty"` +} + +type PropertyType string + +const ( + PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2 + PropertyTypeProcessList PropertyType = "ProcessList" // V1 and V2 + PropertyTypeMappedVirtualDisk PropertyType = "MappedVirtualDisk" // Not supported in V2 schema call + PropertyTypeGuestConnection PropertyType = "GuestConnection" // V1 and V2. Nil return from HCS before RS5 +) + +type PropertyQuery struct { + PropertyTypes []PropertyType `json:",omitempty"` +} + +// ContainerProperties holds the properties for a container and the processes running in that container +type ContainerProperties struct { + ID string `json:"Id"` + State string + Name string + SystemType string + RuntimeOSType string `json:"RuntimeOsType,omitempty"` + Owner string + SiloGUID string `json:"SiloGuid,omitempty"` + RuntimeID guid.GUID `json:"RuntimeId,omitempty"` + IsRuntimeTemplate bool `json:",omitempty"` + RuntimeImagePath string `json:",omitempty"` + Stopped bool `json:",omitempty"` + ExitType string `json:",omitempty"` + AreUpdatesPending bool `json:",omitempty"` + ObRoot string `json:",omitempty"` + Statistics Statistics `json:",omitempty"` + ProcessList []ProcessListItem `json:",omitempty"` + MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"` + GuestConnectionInfo GuestConnectionInfo `json:",omitempty"` +} + +// MemoryStats holds the memory statistics for a container +type MemoryStats struct { + UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} + +// ProcessorStats holds the processor statistics for a container +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:",omitempty"` + RuntimeUser100ns uint64 `json:",omitempty"` + RuntimeKernel100ns uint64 `json:",omitempty"` +} + +// StorageStats holds the storage statistics for a container +type StorageStats struct { + ReadCountNormalized uint64 `json:",omitempty"` + ReadSizeBytes uint64 `json:",omitempty"` + WriteCountNormalized uint64 `json:",omitempty"` + WriteSizeBytes uint64 `json:",omitempty"` +} + +// NetworkStats holds the network statistics for a container +type NetworkStats struct { + BytesReceived uint64 `json:",omitempty"` + BytesSent uint64 `json:",omitempty"` + PacketsReceived uint64 `json:",omitempty"` + PacketsSent uint64 `json:",omitempty"` + DroppedPacketsIncoming uint64 `json:",omitempty"` + DroppedPacketsOutgoing uint64 `json:",omitempty"` + EndpointId string `json:",omitempty"` + InstanceId string `json:",omitempty"` +} + +// Statistics is the structure returned by a statistics call on a container +type Statistics struct { + Timestamp time.Time `json:",omitempty"` + ContainerStartTime time.Time `json:",omitempty"` + Uptime100ns uint64 `json:",omitempty"` + Memory MemoryStats `json:",omitempty"` + Processor ProcessorStats `json:",omitempty"` + Storage StorageStats `json:",omitempty"` + Network []NetworkStats `json:",omitempty"` +} + +// ProcessList is the structure of an item returned by a ProcessList call on a container +type ProcessListItem struct { + CreateTimestamp time.Time `json:",omitempty"` + ImageName string `json:",omitempty"` + KernelTime100ns uint64 `json:",omitempty"` + MemoryCommitBytes uint64 `json:",omitempty"` + MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"` + MemoryWorkingSetSharedBytes uint64 `json:",omitempty"` + ProcessId uint32 `json:",omitempty"` + UserTime100ns uint64 `json:",omitempty"` +} + +// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container +type MappedVirtualDiskController struct { + MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"` +} + +// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM +type GuestDefinedCapabilities struct { + NamespaceAddRequestSupported bool `json:",omitempty"` + SignalProcessSupported bool `json:",omitempty"` + DumpStacksSupported bool `json:",omitempty"` + DeleteContainerStateSupported bool `json:",omitempty"` + UpdateContainerSupported bool `json:",omitempty"` +} + +// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM +type GuestConnectionInfo struct { + SupportedSchemaVersions []hcsschema.Version `json:",omitempty"` + ProtocolVersion uint32 `json:",omitempty"` + GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"` +} + +// Type of Request Support in ModifySystem +type RequestType string + +// Type of Resource Support in ModifySystem +type ResourceType string + +// RequestType const +const ( + Add RequestType = "Add" + Remove RequestType = "Remove" + Network ResourceType = "Network" +) + +// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type ResourceModificationRequestResponse struct { + Resource ResourceType `json:"ResourceType"` + Data interface{} `json:"Settings"` + Request RequestType `json:"RequestType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go new file mode 100644 index 00000000000..70884aad75f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Attachment struct { + Type_ string `json:"Type,omitempty"` + + Path string `json:"Path,omitempty"` + + IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"` + + CachingMode string `json:"CachingMode,omitempty"` + + NoWriteHardening bool `json:"NoWriteHardening,omitempty"` + + DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"` + + IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"` + + CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` + + AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` + + ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go new file mode 100644 index 00000000000..ecbbed4c233 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/battery.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Battery struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go new file mode 100644 index 00000000000..c1ea3953b58 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cache_query_stats_response.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CacheQueryStatsResponse struct { + L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"` + + L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"` + + L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go new file mode 100644 index 00000000000..ca75277a3f2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Chipset struct { + Uefi *Uefi `json:"Uefi,omitempty"` + + IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"` + + BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"` + + ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"` + + ChassisAssetTag string `json:"ChassisAssetTag,omitempty"` + + UseUtc bool `json:"UseUtc,omitempty"` + + // LinuxKernelDirect - Added in v2.2 Builds >=181117 + LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go new file mode 100644 index 00000000000..b4f9c315b05 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/close_handle.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CloseHandle struct { + Handle string `json:"Handle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go new file mode 100644 index 00000000000..8bf8cab60e5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/com_port.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port. +type ComPort struct { + NamedPipe string `json:"NamedPipe,omitempty"` + + OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go new file mode 100644 index 00000000000..10cea67e042 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/compute_system.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ComputeSystem struct { + Owner string `json:"Owner,omitempty"` + + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + HostedSystem interface{} `json:"HostedSystem,omitempty"` + + Container *Container `json:"Container,omitempty"` + + VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"` + + ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go new file mode 100644 index 00000000000..1d5dfe68ad6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/configuration.go @@ -0,0 +1,72 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "net/http" +) + +// contextKeys are used to identify the type of value in the context. +// Since these are string, it is possible to get a short description of the +// context key for logging and debugging using key.String(). + +type contextKey string + +func (c contextKey) String() string { + return "auth " + string(c) +} + +var ( + // ContextOAuth2 takes a oauth2.TokenSource as authentication for the request. + ContextOAuth2 = contextKey("token") + + // ContextBasicAuth takes BasicAuth as authentication for the request. + ContextBasicAuth = contextKey("basic") + + // ContextAccessToken takes a string oauth2 access token as authentication for the request. + ContextAccessToken = contextKey("accesstoken") + + // ContextAPIKey takes an APIKey as authentication for the request + ContextAPIKey = contextKey("apikey") +) + +// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth +type BasicAuth struct { + UserName string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` +} + +// APIKey provides API key based authentication to a request passed via context using ContextAPIKey +type APIKey struct { + Key string + Prefix string +} + +type Configuration struct { + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + HTTPClient *http.Client +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "https://localhost", + DefaultHeader: make(map[string]string), + UserAgent: "Swagger-Codegen/2.1.0/go", + } + return cfg +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go new file mode 100644 index 00000000000..68aa04a573e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/console_size.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ConsoleSize struct { + Height int32 `json:"Height,omitempty"` + + Width int32 `json:"Width,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go new file mode 100644 index 00000000000..39a54432c02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go @@ -0,0 +1,36 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Container struct { + GuestOs *GuestOs `json:"GuestOs,omitempty"` + + Storage *Storage `json:"Storage,omitempty"` + + MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"` + + MappedPipes []MappedPipe `json:"MappedPipes,omitempty"` + + Memory *Memory `json:"Memory,omitempty"` + + Processor *Processor `json:"Processor,omitempty"` + + Networking *Networking `json:"Networking,omitempty"` + + HvSocket *HvSocket `json:"HvSocket,omitempty"` + + ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + AssignedDevices []Device `json:"AssignedDevices,omitempty"` + + AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go new file mode 100644 index 00000000000..495c6ebc8f4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_add_instance_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardAddInstanceRequest struct { + Id string `json:"Id,omitempty"` + CredentialSpec string `json:"CredentialSpec,omitempty"` + Transport string `json:"Transport,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go new file mode 100644 index 00000000000..1ed4c008f25 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_hv_socket_service_config.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardHvSocketServiceConfig struct { + ServiceId string `json:"ServiceId,omitempty"` + ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go new file mode 100644 index 00000000000..d7ebd0fcca1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_instance.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardInstance struct { + Id string `json:"Id,omitempty"` + CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"` + HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go new file mode 100644 index 00000000000..71005b090be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_modify_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardModifyOperation string + +const ( + AddInstance ContainerCredentialGuardModifyOperation = "AddInstance" + RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go new file mode 100644 index 00000000000..952cda4965c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_operation_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardOperationRequest struct { + Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go new file mode 100644 index 00000000000..32e5a3beed1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_remove_instance_request.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardRemoveInstanceRequest struct { + Id string `json:"Id,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go new file mode 100644 index 00000000000..0f8f644379c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_state.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardState struct { + + // Authentication cookie for calls to a Container Credential Guard instance. + Cookie string `json:"Cookie,omitempty"` + + // Name of the RPC endpoint of the Container Credential Guard instance. + RpcEndpoint string `json:"RpcEndpoint,omitempty"` + + // Transport used for the configured Container Credential Guard instance. + Transport string `json:"Transport,omitempty"` + + // Credential spec used for the configured Container Credential Guard instance. + CredentialSpec string `json:"CredentialSpec,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go new file mode 100644 index 00000000000..ea306fa21ac --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_credential_guard_system_info.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerCredentialGuardSystemInfo struct { + Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go new file mode 100644 index 00000000000..1fd7ca5d56f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container_memory_information.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// memory usage as viewed from within the container +type ContainerMemoryInformation struct { + TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"` + + TotalUsage int32 `json:"TotalUsage,omitempty"` + + CommittedBytes int32 `json:"CommittedBytes,omitempty"` + + SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"` + + CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"` + + PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go new file mode 100644 index 00000000000..90332a5190f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU groups allow Hyper-V administrators to better manage and allocate the host's CPU resources across guest virtual machines +type CpuGroup struct { + Id string `json:"Id,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go new file mode 100644 index 00000000000..8794961bf5c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_affinity.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupAffinity struct { + LogicalProcessorCount int32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []int32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go new file mode 100644 index 00000000000..0be0475d41a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupConfig struct { + GroupId string `json:"GroupId,omitempty"` + Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` + GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` + // Hypervisor CPU group IDs exposed to clients + HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go new file mode 100644 index 00000000000..3ace0ccc3b8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_configurations.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to return cpu groups for a Service property query +type CpuGroupConfigurations struct { + CpuGroups []CpuGroupConfig `json:"CpuGroups,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go new file mode 100644 index 00000000000..7d897807016 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_operations.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CPUGroupOperation string + +const ( + CreateGroup CPUGroupOperation = "CreateGroup" + DeleteGroup CPUGroupOperation = "DeleteGroup" + SetProperty CPUGroupOperation = "SetProperty" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go new file mode 100644 index 00000000000..bbad6a2c450 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type CpuGroupProperty struct { + PropertyCode uint32 `json:"PropertyCode,omitempty"` + PropertyValue uint32 `json:"PropertyValue,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go new file mode 100644 index 00000000000..91a8278fe3c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/create_group_operation.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Create group operation settings +type CreateGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []uint32 `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go new file mode 100644 index 00000000000..134bd988175 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/delete_group_operation.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Delete group operation settings +type DeleteGroupOperation struct { + GroupId string `json:"GroupId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go new file mode 100644 index 00000000000..31c4538affc --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go @@ -0,0 +1,27 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceType string + +const ( + ClassGUID DeviceType = "ClassGuid" + DeviceInstanceID DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" +) + +type Device struct { + // The type of device to assign to the container. + Type DeviceType `json:"Type,omitempty"` + // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. + InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` + // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. + LocationPath string `json:"LocationPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go new file mode 100644 index 00000000000..e985d96d228 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/devices.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Devices struct { + ComPorts map[string]ComPort `json:"ComPorts,omitempty"` + + Scsi map[string]Scsi `json:"Scsi,omitempty"` + + VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"` + + NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"` + + VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"` + + Keyboard *Keyboard `json:"Keyboard,omitempty"` + + Mouse *Mouse `json:"Mouse,omitempty"` + + HvSocket *HvSocket2 `json:"HvSocket,omitempty"` + + EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"` + + GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"` + + VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"` + + Plan9 *Plan9 `json:"Plan9,omitempty"` + + Battery *Battery `json:"Battery,omitempty"` + + FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"` + + SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"` + + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + VirtualPci map[string]VirtualPciDevice `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go new file mode 100644 index 00000000000..85450c41e10 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/enhanced_mode_video.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type EnhancedModeVideo struct { + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go new file mode 100644 index 00000000000..fe86cab6556 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/flexible_io_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type FlexibleIoDevice struct { + EmulatorId string `json:"EmulatorId,omitempty"` + + HostingModel string `json:"HostingModel,omitempty"` + + Configuration []string `json:"Configuration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go new file mode 100644 index 00000000000..7db29495b3e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestConnection struct { + + // Use Vsock rather than Hyper-V sockets to communicate with the guest service. + UseVsock bool `json:"UseVsock,omitempty"` + + // Don't disconnect the guest connection when pausing the virtual machine. + UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go new file mode 100644 index 00000000000..8a369bab71c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_connection_info.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Information about the guest. +type GuestConnectionInfo struct { + + // Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities. + SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"` + + ProtocolVersion int32 `json:"ProtocolVersion,omitempty"` + + GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go new file mode 100644 index 00000000000..af828004835 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_crash_reporting.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestCrashReporting struct { + WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go new file mode 100644 index 00000000000..8838519a39c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_os.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestOs struct { + HostName string `json:"HostName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go new file mode 100644 index 00000000000..ef1eec88656 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type GuestState struct { + + // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. + GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` + + // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. + RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` + + // If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store. + ForceTransientState bool `json:"ForceTransientState,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go new file mode 100644 index 00000000000..2238ce5306c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/host_processor_modify_request.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Structure used to request a service processor modification +type HostProcessorModificationRequest struct { + Operation CPUGroupOperation `json:"Operation,omitempty"` + OperationDetails interface{} `json:"OperationDetails,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go new file mode 100644 index 00000000000..ea3084bca7f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hosted_system.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HostedSystem struct { + SchemaVersion *Version `json:"SchemaVersion,omitempty"` + + Container *Container `json:"Container,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go new file mode 100644 index 00000000000..23b2ee9e7d4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocket struct { + Config *HvSocketSystemConfig `json:"Config,omitempty"` + + EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go new file mode 100644 index 00000000000..a017691f02d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_2.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// HvSocket configuration for a VM +type HvSocket2 struct { + HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go new file mode 100644 index 00000000000..84c11b93ee5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_address.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This class defines address settings applied to a VM +// by the GCS every time a VM starts or restores. +type HvSocketAddress struct { + LocalAddress string `json:"LocalAddress,omitempty"` + ParentAddress string `json:"ParentAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go new file mode 100644 index 00000000000..ecd9f7fbac2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_service_config.go @@ -0,0 +1,28 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type HvSocketServiceConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1. + ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"` + + // If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors + AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"` + + // Disabled controls whether the HvSocket service is accepting connection requests. + // This set to true will make the service refuse all incoming connections as well as cancel + // any connections already established. The service itself will still be active however + // and can be re-enabled at a future time. + Disabled bool `json:"Disabled,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go new file mode 100644 index 00000000000..69f4f9d39b9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/hv_socket_system_config.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1. +type HvSocketSystemConfig struct { + + // SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds). + DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"` + + // SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container. + DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"` + + ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go new file mode 100644 index 00000000000..a614d63bd72 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/interrupt_moderation_mode.go @@ -0,0 +1,42 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterruptModerationName string + +// The valid interrupt moderation modes for I/O virtualization (IOV) offloading. +const ( + DefaultName InterruptModerationName = "Default" + AdaptiveName InterruptModerationName = "Adaptive" + OffName InterruptModerationName = "Off" + LowName InterruptModerationName = "Low" + MediumName InterruptModerationName = "Medium" + HighName InterruptModerationName = "High" +) + +type InterruptModerationValue uint32 + +const ( + DefaultValue InterruptModerationValue = iota + AdaptiveValue + OffValue + LowValue InterruptModerationValue = 100 + MediumValue InterruptModerationValue = 200 + HighValue InterruptModerationValue = 300 +) + +var InterruptModerationValueToName = map[InterruptModerationValue]InterruptModerationName{ + DefaultValue: DefaultName, + AdaptiveValue: AdaptiveName, + OffValue: OffName, + LowValue: LowName, + MediumValue: MediumName, + HighValue: HighName, +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go new file mode 100644 index 00000000000..2a55cc37cd3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/iov_settings.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IovSettings struct { + // The weight assigned to this port for I/O virtualization (IOV) offloading. + // Setting this to 0 disables IOV offloading. + OffloadWeight *uint32 `json:"OffloadWeight,omitempty"` + + // The number of queue pairs requested for this port for I/O virtualization (IOV) offloading. + QueuePairsRequested *uint32 `json:"QueuePairsRequested,omitempty"` + + // The interrupt moderation mode for I/O virtualization (IOV) offloading. + InterruptModeration *InterruptModerationName `json:"InterruptModeration,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go new file mode 100644 index 00000000000..3d3fa3b1c73 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/keyboard.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Keyboard struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go new file mode 100644 index 00000000000..176c49d4959 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Layer struct { + Id string `json:"Id,omitempty"` + + Path string `json:"Path,omitempty"` + + PathType string `json:"PathType,omitempty"` + + // Unspecified defaults to Enabled + Cache string `json:"Cache,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go new file mode 100644 index 00000000000..0ab6c280fc8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/linux_kernel_direct.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.2 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LinuxKernelDirect struct { + KernelFilePath string `json:"KernelFilePath,omitempty"` + + InitRdPath string `json:"InitRdPath,omitempty"` + + KernelCmdLine string `json:"KernelCmdLine,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go new file mode 100644 index 00000000000..2e3aa5e1750 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/logical_processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type LogicalProcessor struct { + LpIndex uint32 `json:"LpIndex,omitempty"` + NodeNumber uint8 `json:"NodeNumber,omitempty"` + PackageId uint32 `json:"PackageId,omitempty"` + CoreId uint32 `json:"CoreId,omitempty"` + RootVpIndex int32 `json:"RootVpIndex,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go new file mode 100644 index 00000000000..9b86a40457f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_directory.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedDirectory struct { + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` + + ContainerPath string `json:"ContainerPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go new file mode 100644 index 00000000000..208074e9a25 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mapped_pipe.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MappedPipe struct { + ContainerPipeName string `json:"ContainerPipeName,omitempty"` + + HostPath string `json:"HostPath,omitempty"` + + HostPathType string `json:"HostPathType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go new file mode 100644 index 00000000000..30749c67249 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go new file mode 100644 index 00000000000..71224c75b9d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go @@ -0,0 +1,49 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Memory2 struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + + EnableHotHint bool `json:"EnableHotHint,omitempty"` + + EnableColdHint bool `json:"EnableColdHint,omitempty"` + + EnableEpf bool `json:"EnableEpf,omitempty"` + + // EnableDeferredCommit is private in the schema. If regenerated need to add back. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + + // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed + // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by + // the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + + // LowMmioGapInMB is the low MMIO region allocated below 4GB. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + + // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and + // size). + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + + // HighMmioGapInMB is the high MMIO region. + // + // TODO: This is pre-release support in schema 2.3. Need to add build number + // docs when a public build with this is out. + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go new file mode 100644 index 00000000000..811779b04b2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_information_for_vm.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type MemoryInformationForVm struct { + VirtualNodeCount uint32 `json:"VirtualNodeCount,omitempty"` + + VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"` + + VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go new file mode 100644 index 00000000000..906ba597f9f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Memory runtime statistics +type MemoryStats struct { + MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"` + + MemoryUsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"` + + MemoryUsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go new file mode 100644 index 00000000000..8dbe40b3be2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerDefinitionDevice struct { + DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go new file mode 100644 index 00000000000..8fe89f92747 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceCategory struct { + Name string `json:"name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go new file mode 100644 index 00000000000..a62568d892e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtension struct { + DeviceCategory *DeviceCategory `json:"device_category,omitempty"` + Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go new file mode 100644 index 00000000000..a7410febd6d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceInstance struct { + Id string `json:"id,omitempty"` + LocationPath string `json:"location_path,omitempty"` + PortName string `json:"port_name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go new file mode 100644 index 00000000000..3553640647e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceNamespace struct { + RequiresDriverstore bool `json:"requires_driverstore,omitempty"` + DeviceCategory []DeviceCategory `json:"device_category,omitempty"` + DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go new file mode 100644 index 00000000000..7be98b54107 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterfaceClass struct { + Type_ string `json:"type,omitempty"` + Identifier string `json:"identifier,omitempty"` + Recurse bool `json:"recurse,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go new file mode 100644 index 00000000000..3ab9cf1ecf0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtensionNamespace struct { + Ob *ObjectNamespace `json:"ob,omitempty"` + Device *DeviceNamespace `json:"device,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go new file mode 100644 index 00000000000..d2f51b3b53c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectDirectory struct { + Name string `json:"name,omitempty"` + Clonesd string `json:"clonesd,omitempty"` + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go new file mode 100644 index 00000000000..47dfb55bfa8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectNamespace struct { + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go new file mode 100644 index 00000000000..8867ebe5f02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectSymlink struct { + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Scope string `json:"scope,omitempty"` + Pathtoclone string `json:"pathtoclone,omitempty"` + AccessMask int32 `json:"access_mask,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go new file mode 100644 index 00000000000..1384ed88821 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modification_request.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModificationRequest struct { + PropertyType PropertyType `json:"PropertyType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go new file mode 100644 index 00000000000..d29455a3e43 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ModifySettingRequest struct { + ResourcePath string `json:"ResourcePath,omitempty"` + + RequestType string `json:"RequestType,omitempty"` + + Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated + + GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go new file mode 100644 index 00000000000..ccf8b938f3a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/mouse.go @@ -0,0 +1,13 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Mouse struct { +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go new file mode 100644 index 00000000000..7408abd317d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/network_adapter.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NetworkAdapter struct { + EndpointId string `json:"EndpointId,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + // The I/O virtualization (IOV) offloading configuration. + IovSettings *IovSettings `json:"IovSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go new file mode 100644 index 00000000000..e5ea187a295 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/networking.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Networking struct { + AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"` + + DnsSearchList string `json:"DnsSearchList,omitempty"` + + NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"` + + // Guid in windows; string in linux + Namespace string `json:"Namespace,omitempty"` + + NetworkAdapters []string `json:"NetworkAdapters,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go new file mode 100644 index 00000000000..d96c9501f33 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_notification.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Notification data that is indicated to components running in the Virtual Machine. +type PauseNotification struct { + Reason string `json:"Reason,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go new file mode 100644 index 00000000000..21707a88eb7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/pause_options.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Options for HcsPauseComputeSystem +type PauseOptions struct { + SuspensionLevel string `json:"SuspensionLevel,omitempty"` + + HostedNotification *PauseNotification `json:"HostedNotification,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go new file mode 100644 index 00000000000..29d8c8012ff --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9 struct { + Shares []Plan9Share `json:"Shares,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go new file mode 100644 index 00000000000..41f8fdea029 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/plan9_share.go @@ -0,0 +1,34 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Plan9Share struct { + Name string `json:"Name,omitempty"` + + // The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol. + AccessName string `json:"AccessName,omitempty"` + + Path string `json:"Path,omitempty"` + + Port int32 `json:"Port,omitempty"` + + // Flags are marked private. Until they are exported correctly + // + // ReadOnly 0x00000001 + // LinuxMetadata 0x00000004 + // CaseSensitive 0x00000008 + Flags int32 `json:"Flags,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go new file mode 100644 index 00000000000..e9a662dd59d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_details.go @@ -0,0 +1,33 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Information about a process running in a container +type ProcessDetails struct { + ProcessId int32 `json:"ProcessId,omitempty"` + + ImageName string `json:"ImageName,omitempty"` + + CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"` + + UserTime100ns int32 `json:"UserTime100ns,omitempty"` + + KernelTime100ns int32 `json:"KernelTime100ns,omitempty"` + + MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"` + + MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"` + + MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go new file mode 100644 index 00000000000..e4ed095c7be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_modify_request.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Passed to HcsRpc_ModifyProcess +type ProcessModifyRequest struct { + Operation string `json:"Operation,omitempty"` + + ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"` + + CloseHandle *CloseHandle `json:"CloseHandle,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go new file mode 100644 index 00000000000..82b0d0532b2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_parameters.go @@ -0,0 +1,46 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessParameters struct { + ApplicationName string `json:"ApplicationName,omitempty"` + + CommandLine string `json:"CommandLine,omitempty"` + + // optional alternative to CommandLine, currently only supported by Linux GCS + CommandArgs []string `json:"CommandArgs,omitempty"` + + User string `json:"User,omitempty"` + + WorkingDirectory string `json:"WorkingDirectory,omitempty"` + + Environment map[string]string `json:"Environment,omitempty"` + + // if set, will run as low-privilege process + RestrictedToken bool `json:"RestrictedToken,omitempty"` + + // if set, ignore StdErrPipe + EmulateConsole bool `json:"EmulateConsole,omitempty"` + + CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"` + + CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"` + + CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"` + + // height then width + ConsoleSize []int32 `json:"ConsoleSize,omitempty"` + + // if set, find an existing session for the user and create the process in it + UseExistingLogin bool `json:"UseExistingLogin,omitempty"` + + // if set, use the legacy console instead of conhost + UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go new file mode 100644 index 00000000000..ad9a4fa9ad6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/process_status.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Status of a process running in a container +type ProcessStatus struct { + ProcessId int32 `json:"ProcessId,omitempty"` + + Exited bool `json:"Exited,omitempty"` + + ExitCode int32 `json:"ExitCode,omitempty"` + + LastWaitResult int32 `json:"LastWaitResult,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go new file mode 100644 index 00000000000..bb24e88da1a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor struct { + Count int32 `json:"Count,omitempty"` + + Maximum int32 `json:"Maximum,omitempty"` + + Weight int32 `json:"Weight,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go new file mode 100644 index 00000000000..c64f335ec7d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go @@ -0,0 +1,23 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.5 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Processor2 struct { + Count int32 `json:"Count,omitempty"` + + Limit int32 `json:"Limit,omitempty"` + + Weight int32 `json:"Weight,omitempty"` + + ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` + + // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go new file mode 100644 index 00000000000..6157e252256 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_stats.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// CPU runtime statistics +type ProcessorStats struct { + TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"` + + RuntimeUser100ns uint64 `json:"RuntimeUser100ns,omitempty"` + + RuntimeKernel100ns uint64 `json:"RuntimeKernel100ns,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go new file mode 100644 index 00000000000..885156e77fa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_topology.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ProcessorTopology struct { + LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"` + LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go new file mode 100644 index 00000000000..17558cba0f2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -0,0 +1,54 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + v1 "github.com/containerd/cgroups/stats/v1" +) + +type Properties struct { + Id string `json:"Id,omitempty"` + + SystemType string `json:"SystemType,omitempty"` + + RuntimeOsType string `json:"RuntimeOsType,omitempty"` + + Name string `json:"Name,omitempty"` + + Owner string `json:"Owner,omitempty"` + + RuntimeId string `json:"RuntimeId,omitempty"` + + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` + + State string `json:"State,omitempty"` + + Stopped bool `json:"Stopped,omitempty"` + + ExitType string `json:"ExitType,omitempty"` + + Memory *MemoryInformationForVm `json:"Memory,omitempty"` + + Statistics *Statistics `json:"Statistics,omitempty"` + + ProcessList []ProcessDetails `json:"ProcessList,omitempty"` + + TerminateOnLastHandleClosed bool `json:"TerminateOnLastHandleClosed,omitempty"` + + HostingSystemId string `json:"HostingSystemId,omitempty"` + + SharedMemoryRegionInfo []SharedMemoryRegionInfo `json:"SharedMemoryRegionInfo,omitempty"` + + GuestConnectionInfo *GuestConnectionInfo `json:"GuestConnectionInfo,omitempty"` + + // Metrics is not part of the API for HCS but this is used for LCOW v2 to + // return the full cgroup metrics from the guest. + Metrics *v1.Metrics `json:"LCOWMetrics,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go new file mode 100644 index 00000000000..d6d80df1314 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_query.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// By default the basic properties will be returned. This query provides a way to request specific properties. +type PropertyQuery struct { + PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go new file mode 100644 index 00000000000..98f2c96edbd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type PropertyType string + +const ( + PTMemory PropertyType = "Memory" + PTGuestMemory PropertyType = "GuestMemory" + PTStatistics PropertyType = "Statistics" + PTProcessList PropertyType = "ProcessList" + PTTerminateOnLastHandleClosed PropertyType = "TerminateOnLastHandleClosed" + PTSharedMemoryRegion PropertyType = "SharedMemoryRegion" + PTContainerCredentialGuard PropertyType = "ContainerCredentialGuard" // This field is not generated by swagger. This was added manually. + PTGuestConnection PropertyType = "GuestConnection" + PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" + PTProcessorTopology PropertyType = "ProcessorTopology" + PTCPUGroup PropertyType = "CpuGroup" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go new file mode 100644 index 00000000000..8d5f5c1719e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/rdp_connection_options.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RdpConnectionOptions struct { + AccessSids []string `json:"AccessSids,omitempty"` + + NamedPipe string `json:"NamedPipe,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go new file mode 100644 index 00000000000..006906f6e2f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_changes.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryChanges struct { + AddValues []RegistryValue `json:"AddValues,omitempty"` + + DeleteKeys []RegistryKey `json:"DeleteKeys,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go new file mode 100644 index 00000000000..26fde99c74c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryKey struct { + Hive string `json:"Hive,omitempty"` + + Name string `json:"Name,omitempty"` + + Volatile bool `json:"Volatile,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go new file mode 100644 index 00000000000..3f203176c32 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -0,0 +1,30 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RegistryValue struct { + Key *RegistryKey `json:"Key,omitempty"` + + Name string `json:"Name,omitempty"` + + Type_ string `json:"Type,omitempty"` + + // One and only one value type must be set. + StringValue string `json:"StringValue,omitempty"` + + BinaryValue string `json:"BinaryValue,omitempty"` + + DWordValue int32 `json:"DWordValue,omitempty"` + + QWordValue int32 `json:"QWordValue,omitempty"` + + // Only used if RegistryValueType is CustomType The data is in BinaryValue + CustomType int32 `json:"CustomType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go new file mode 100644 index 00000000000..778ff58735a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/restore_state.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type RestoreState struct { + + // The path to the save state file to restore the system from. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` + + // The ID of the template system to clone this new system off of. An empty string indicates the system should not be cloned from a template. + TemplateSystemId string `json:"TemplateSystemId,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go new file mode 100644 index 00000000000..e55fa1d98a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/save_options.go @@ -0,0 +1,19 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SaveOptions struct { + + // The type of save operation to be performed. + SaveType string `json:"SaveType,omitempty"` + + // The path to the file that will container the saved state. + SaveStateFilePath string `json:"SaveStateFilePath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go new file mode 100644 index 00000000000..bf253a470b6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/scsi.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Scsi struct { + + // Map of attachments, where the key is the integer LUN number on the controller. + Attachments map[string]Attachment `json:"Attachments,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go new file mode 100644 index 00000000000..b8142ca6a61 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/service_properties.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import "encoding/json" + +type ServiceProperties struct { + // Changed Properties field to []json.RawMessage from []interface{} to avoid having to + // remarshal sp.Properties[n] and unmarshal into the type(s) we want. + Properties []json.RawMessage `json:"Properties,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go new file mode 100644 index 00000000000..df9baa9219a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_configuration.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryConfiguration struct { + Regions []SharedMemoryRegion `json:"Regions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go new file mode 100644 index 00000000000..825b71865d7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegion struct { + SectionName string `json:"SectionName,omitempty"` + + StartOffset int32 `json:"StartOffset,omitempty"` + + Length int32 `json:"Length,omitempty"` + + AllowGuestWrite bool `json:"AllowGuestWrite,omitempty"` + + HiddenFromGuest bool `json:"HiddenFromGuest,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go new file mode 100644 index 00000000000..f67b08eb57a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/shared_memory_region_info.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SharedMemoryRegionInfo struct { + SectionName string `json:"SectionName,omitempty"` + + GuestPhysicalAddress int32 `json:"GuestPhysicalAddress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go new file mode 100644 index 00000000000..5eaf6a7f4a2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/silo_properties.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Silo job information +type SiloProperties struct { + Enabled bool `json:"Enabled,omitempty"` + + JobName string `json:"JobName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go new file mode 100644 index 00000000000..ba7a6b3963b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/statistics.go @@ -0,0 +1,29 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +import ( + "time" +) + +// Runtime statistics for a container +type Statistics struct { + Timestamp time.Time `json:"Timestamp,omitempty"` + + ContainerStartTime time.Time `json:"ContainerStartTime,omitempty"` + + Uptime100ns uint64 `json:"Uptime100ns,omitempty"` + + Processor *ProcessorStats `json:"Processor,omitempty"` + + Memory *MemoryStats `json:"Memory,omitempty"` + + Storage *StorageStats `json:"Storage,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go new file mode 100644 index 00000000000..2627af91323 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Storage struct { + + // List of layers that describe the parent hierarchy for a container's storage. These layers combined together, presented as a disposable and/or committable working storage, are used by the container to record all changes done to the parent layers. + Layers []Layer `json:"Layers,omitempty"` + + // Path that points to the scratch space of a container, where parent layers are combined together to present a new disposable and/or committable layer with the changes done during its runtime. + Path string `json:"Path,omitempty"` + + QoS *StorageQoS `json:"QoS,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go new file mode 100644 index 00000000000..9c5e6eb5323 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_qo_s.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type StorageQoS struct { + IopsMaximum int32 `json:"IopsMaximum,omitempty"` + + BandwidthMaximum int32 `json:"BandwidthMaximum,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go new file mode 100644 index 00000000000..4f042ffd937 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/storage_stats.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// Storage runtime statistics +type StorageStats struct { + ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"` + + ReadSizeBytes uint64 `json:"ReadSizeBytes,omitempty"` + + WriteCountNormalized uint64 `json:"WriteCountNormalized,omitempty"` + + WriteSizeBytes uint64 `json:"WriteSizeBytes,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go new file mode 100644 index 00000000000..83486994036 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Topology struct { + Memory *Memory2 `json:"Memory,omitempty"` + + Processor *Processor2 `json:"Processor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go new file mode 100644 index 00000000000..0e48ece500c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Uefi struct { + EnableDebugger bool `json:"EnableDebugger,omitempty"` + + SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` + + BootThis *UefiBootEntry `json:"BootThis,omitempty"` + + Console string `json:"Console,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go new file mode 100644 index 00000000000..3ab409d825e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi_boot_entry.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type UefiBootEntry struct { + DeviceType string `json:"DeviceType,omitempty"` + + DevicePath string `json:"DevicePath,omitempty"` + + DiskNumber int32 `json:"DiskNumber,omitempty"` + + OptionalData string `json:"OptionalData,omitempty"` + + VmbFsRootPath string `json:"VmbFsRootPath,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go new file mode 100644 index 00000000000..2abfccca315 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/version.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Version struct { + Major int32 `json:"Major,omitempty"` + + Minor int32 `json:"Minor,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go new file mode 100644 index 00000000000..ec5d0fb936d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/video_monitor.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VideoMonitor struct { + HorizontalResolution int32 `json:"HorizontalResolution,omitempty"` + + VerticalResolution int32 `json:"VerticalResolution,omitempty"` + + ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go new file mode 100644 index 00000000000..2d22b1bcb08 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -0,0 +1,32 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachine struct { + + // StopOnReset is private in the schema. If regenerated need to put back. + StopOnReset bool `json:"StopOnReset,omitempty"` + + Chipset *Chipset `json:"Chipset,omitempty"` + + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + + Devices *Devices `json:"Devices,omitempty"` + + GuestState *GuestState `json:"GuestState,omitempty"` + + RestoreState *RestoreState `json:"RestoreState,omitempty"` + + RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` + + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + + GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go new file mode 100644 index 00000000000..91a3c83d4ff --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_node_info.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualNodeInfo struct { + VirtualNodeIndex int32 `json:"VirtualNodeIndex,omitempty"` + + PhysicalNodeNumber int32 `json:"PhysicalNodeNumber,omitempty"` + + VirtualProcessorCount int32 `json:"VirtualProcessorCount,omitempty"` + + MemoryUsageInPages int32 `json:"MemoryUsageInPages,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go new file mode 100644 index 00000000000..f5b7f3e38c0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_controller.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemController struct { + Devices map[string]VirtualPMemDevice `json:"Devices,omitempty"` + + MaximumCount uint32 `json:"MaximumCount,omitempty"` + + MaximumSizeBytes uint64 `json:"MaximumSizeBytes,omitempty"` + + Backing string `json:"Backing,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go new file mode 100644 index 00000000000..70cf2d90de0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_device.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemDevice struct { + HostPath string `json:"HostPath,omitempty"` + + ReadOnly bool `json:"ReadOnly,omitempty"` + + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 00000000000..9ef322f615b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go new file mode 100644 index 00000000000..f5e05903c54 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciDevice struct { + Functions []VirtualPciFunction `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go new file mode 100644 index 00000000000..cedb7d18bc2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_function.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.3 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// TODO: This is pre-release support in schema 2.3. Need to add build number +// docs when a public build with this is out. +type VirtualPciFunction struct { + DeviceInstancePath string `json:",omitempty"` + + VirtualFunction uint16 `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go new file mode 100644 index 00000000000..362df363e13 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmb struct { + Shares []VirtualSmbShare `json:"Shares,omitempty"` + + DirectFileMappingInMB int64 `json:"DirectFileMappingInMB,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go new file mode 100644 index 00000000000..915e9b6386a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share.go @@ -0,0 +1,20 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShare struct { + Name string `json:"Name,omitempty"` + + Path string `json:"Path,omitempty"` + + AllowedFiles []string `json:"AllowedFiles,omitempty"` + + Options *VirtualSmbShareOptions `json:"Options,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go new file mode 100644 index 00000000000..75196bd8c8d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_smb_share_options.go @@ -0,0 +1,62 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualSmbShareOptions struct { + ReadOnly bool `json:"ReadOnly,omitempty"` + + // convert exclusive access to shared read access + ShareRead bool `json:"ShareRead,omitempty"` + + // all opens will use cached I/O + CacheIo bool `json:"CacheIo,omitempty"` + + // disable oplock support + NoOplocks bool `json:"NoOplocks,omitempty"` + + // Acquire the backup privilege when attempting to open + TakeBackupPrivilege bool `json:"TakeBackupPrivilege,omitempty"` + + // Use the identity of the share root when opening + UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"` + + // disable Direct Mapping + NoDirectmap bool `json:"NoDirectmap,omitempty"` + + // disable Byterange locks + NoLocks bool `json:"NoLocks,omitempty"` + + // disable Directory CHange Notifications + NoDirnotify bool `json:"NoDirnotify,omitempty"` + + // share is use for VM shared memory + VmSharedMemory bool `json:"VmSharedMemory,omitempty"` + + // allow access only to the files specified in AllowedFiles + RestrictFileAccess bool `json:"RestrictFileAccess,omitempty"` + + // disable all oplocks except Level II + ForceLevelIIOplocks bool `json:"ForceLevelIIOplocks,omitempty"` + + // Allow the host to reparse this base layer + ReparseBaseLayer bool `json:"ReparseBaseLayer,omitempty"` + + // Enable pseudo-oplocks + PseudoOplocks bool `json:"PseudoOplocks,omitempty"` + + // All opens will use non-cached IO + NonCacheIo bool `json:"NonCacheIo,omitempty"` + + // Enable pseudo directory change notifications + PseudoDirnotify bool `json:"PseudoDirnotify,omitempty"` + + // Block directory enumeration, renames, and deletes. + SingleFileMapping bool `json:"SingleFileMapping,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go new file mode 100644 index 00000000000..8e1836dd6be --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_memory.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VmMemory struct { + AvailableMemory int32 `json:"AvailableMemory,omitempty"` + + AvailableMemoryBuffer int32 `json:"AvailableMemoryBuffer,omitempty"` + + ReservedMemory uint64 `json:"ReservedMemory,omitempty"` + + AssignedMemory uint64 `json:"AssignedMemory,omitempty"` + + SlpActive bool `json:"SlpActive,omitempty"` + + BalancingEnabled bool `json:"BalancingEnabled,omitempty"` + + DmOperationInProgress bool `json:"DmOperationInProgress,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go new file mode 100644 index 00000000000..de1b9cf1ae2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/vm_processor_limits.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// ProcessorLimits is used when modifying processor scheduling limits of a virtual machine. +type ProcessorLimits struct { + // Maximum amount of host CPU resources that the virtual machine can use. + Limit uint64 `json:"Limit,omitempty"` + // Value describing the relative priority of this virtual machine compared to other virtual machines. + Weight uint64 `json:"Weight,omitempty"` + // Minimum amount of host CPU resources that the virtual machine is guaranteed. + Reservation uint64 `json:"Reservation,omitempty"` + // Provides the target maximum CPU frequency, in MHz, for a virtual machine. + MaximumFrequencyMHz uint32 `json:"MaximumFrequencyMHz,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go new file mode 100644 index 00000000000..8ed7e566d64 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type WindowsCrashReporting struct { + DumpFileName string `json:"DumpFileName,omitempty"` + + MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go new file mode 100644 index 00000000000..a634dfc1515 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go @@ -0,0 +1,49 @@ +package hcs + +import ( + "context" + "encoding/json" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/vmcompute" +) + +// GetServiceProperties returns properties of the host compute service. +func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) { + operation := "hcs::GetServiceProperties" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.ServiceProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, err + } + return properties, nil +} + +// ModifyServiceSettings modifies settings of the host compute service. +func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error { + operation := "hcs::ModifyServiceSettings" + + settingsJSON, err := json.Marshal(settings) + if err != nil { + return err + } + resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return &HcsError{Op: operation, Err: err, Events: events} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go new file mode 100644 index 00000000000..75499c967f0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -0,0 +1,637 @@ +package hcs + +import ( + "context" + "encoding/json" + "errors" + "strings" + "sync" + "syscall" + + "github.com/Microsoft/hcsshim/internal/cow" + "github.com/Microsoft/hcsshim/internal/hcs/schema1" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "github.com/Microsoft/hcsshim/internal/vmcompute" + "go.opencensus.io/trace" +) + +type System struct { + handleLock sync.RWMutex + handle vmcompute.HcsSystem + id string + callbackNumber uintptr + + closedWaitOnce sync.Once + waitBlock chan struct{} + waitError error + exitError error + os, typ string +} + +func newSystem(id string) *System { + return &System{ + id: id, + waitBlock: make(chan struct{}), + } +} + +// CreateComputeSystem creates a new compute system with the given configuration but does not start it. +func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { + operation := "hcs::CreateComputeSystem" + + // hcsCreateComputeSystemContext is an async operation. Start the outer span + // here to measure the full create time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", id)) + + computeSystem := newSystem(id) + + hcsDocumentB, err := json.Marshal(hcsDocumentInterface) + if err != nil { + return nil, err + } + + hcsDocument := string(hcsDocumentB) + + var ( + identity syscall.Handle + resultJSON string + createError error + ) + computeSystem.handle, resultJSON, createError = vmcompute.HcsCreateComputeSystem(ctx, id, hcsDocument, identity) + if createError == nil || IsPending(createError) { + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + return nil, makeSystemError(computeSystem, operation, err, nil) + } + } + + events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) + if err != nil { + if err == ErrTimeout { + // Terminate the compute system if it still exists. We're okay to + // ignore a failure here. + _ = computeSystem.Terminate(ctx) + } + return nil, makeSystemError(computeSystem, operation, err, events) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +// OpenComputeSystem opens an existing compute system by ID. +func OpenComputeSystem(ctx context.Context, id string) (*System, error) { + operation := "hcs::OpenComputeSystem" + + computeSystem := newSystem(id) + handle, resultJSON, err := vmcompute.HcsOpenComputeSystem(ctx, id) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + computeSystem.handle = handle + defer func() { + if err != nil { + computeSystem.Close() + } + }() + if err = computeSystem.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go computeSystem.waitBackground() + if err = computeSystem.getCachedProperties(ctx); err != nil { + return nil, err + } + return computeSystem, nil +} + +func (computeSystem *System) getCachedProperties(ctx context.Context) error { + props, err := computeSystem.Properties(ctx) + if err != nil { + return err + } + computeSystem.typ = strings.ToLower(props.SystemType) + computeSystem.os = strings.ToLower(props.RuntimeOSType) + if computeSystem.os == "" && computeSystem.typ == "container" { + // Pre-RS5 HCS did not return the OS, but it only supported containers + // that ran Windows. + computeSystem.os = "windows" + } + return nil +} + +// OS returns the operating system of the compute system, "linux" or "windows". +func (computeSystem *System) OS() string { + return computeSystem.os +} + +// IsOCI returns whether processes in the compute system should be created via +// OCI. +func (computeSystem *System) IsOCI() bool { + return computeSystem.os == "linux" && computeSystem.typ == "container" +} + +// GetComputeSystems gets a list of the compute systems on the system that match the query +func GetComputeSystems(ctx context.Context, q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) { + operation := "hcs::GetComputeSystems" + + queryb, err := json.Marshal(q) + if err != nil { + return nil, err + } + + computeSystemsJSON, resultJSON, err := vmcompute.HcsEnumerateComputeSystems(ctx, string(queryb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, &HcsError{Op: operation, Err: err, Events: events} + } + + if computeSystemsJSON == "" { + return nil, ErrUnexpectedValue + } + computeSystems := []schema1.ContainerProperties{} + if err = json.Unmarshal([]byte(computeSystemsJSON), &computeSystems); err != nil { + return nil, err + } + + return computeSystems, nil +} + +// Start synchronously starts the computeSystem. +func (computeSystem *System) Start(ctx context.Context) (err error) { + operation := "hcs::System::Start" + + // hcsStartComputeSystemContext is an async operation. Start the outer span + // here to measure the full start time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// ID returns the compute system's identifier. +func (computeSystem *System) ID() string { + return computeSystem.id +} + +// Shutdown requests a compute system shutdown. +func (computeSystem *System) Shutdown(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Shutdown" + + if computeSystem.handle == 0 { + return nil + } + + resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// Terminate requests a compute system terminate. +func (computeSystem *System) Terminate(ctx context.Context) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Terminate" + + if computeSystem.handle == 0 { + return nil + } + + resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") + events := processHcsResult(ctx, resultJSON) + switch err { + case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: + default: + return makeSystemError(computeSystem, operation, err, events) + } + return nil +} + +// waitBackground waits for the compute system exit notification. Once received +// sets `computeSystem.waitError` (if any) and unblocks all `Wait` calls. +// +// This MUST be called exactly once per `computeSystem.handle` but `Wait` is +// safe to call multiple times. +func (computeSystem *System) waitBackground() { + operation := "hcs::System::waitBackground" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) + switch err { + case nil: + log.G(ctx).Debug("system exited") + case ErrVmcomputeUnexpectedExit: + log.G(ctx).Debug("unexpected system exit") + computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) + err = nil + default: + err = makeSystemError(computeSystem, operation, err, nil) + } + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = err + close(computeSystem.waitBlock) + }) + oc.SetSpanStatus(span, err) +} + +// Wait synchronously waits for the compute system to shutdown or terminate. If +// the compute system has already exited returns the previous error (if any). +func (computeSystem *System) Wait() error { + <-computeSystem.waitBlock + return computeSystem.waitError +} + +// ExitError returns an error describing the reason the compute system terminated. +func (computeSystem *System) ExitError() error { + select { + case <-computeSystem.waitBlock: + if computeSystem.waitError != nil { + return computeSystem.waitError + } + return computeSystem.exitError + default: + return errors.New("container not exited") + } +} + +// Properties returns the requested container properties targeting a V1 schema container. +func (computeSystem *System) Properties(ctx context.Context, types ...schema1.PropertyType) (*schema1.ContainerProperties, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Properties" + + queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &schema1.ContainerProperties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return properties, nil +} + +// PropertiesV2 returns the requested container properties targeting a V2 schema container. +func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::PropertiesV2" + + queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + propertiesJSON, resultJSON, err := vmcompute.HcsGetComputeSystemProperties(ctx, computeSystem.handle, string(queryBytes)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + if propertiesJSON == "" { + return nil, ErrUnexpectedValue + } + properties := &hcsschema.Properties{} + if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + + return properties, nil +} + +// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Pause(ctx context.Context) (err error) { + operation := "hcs::System::Pause" + + // hcsPauseComputeSystemContext is an async peration. Start the outer span + // here to measure the full pause time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5. +func (computeSystem *System) Resume(ctx context.Context) (err error) { + operation := "hcs::System::Resume" + + // hcsResumeComputeSystemContext is an async operation. Start the outer span + // here to measure the full restore time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +// Save the compute system +func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { + operation := "hcs::System::Save" + + // hcsSaveComputeSystemContext is an async peration. Start the outer span + // here to measure the full save time. + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + saveOptions, err := json.Marshal(options) + if err != nil { + return err + } + + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) + events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} + +func (computeSystem *System) createProcess(ctx context.Context, operation string, c interface{}) (*Process, *vmcompute.HcsProcessInformation, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + if computeSystem.handle == 0 { + return nil, nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + configurationb, err := json.Marshal(c) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, nil) + } + + configuration := string(configurationb) + processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, nil, makeSystemError(computeSystem, operation, err, events) + } + + log.G(ctx).WithField("pid", processInfo.ProcessId).Debug("created process pid") + return newProcess(processHandle, int(processInfo.ProcessId), computeSystem), &processInfo, nil +} + +// CreateProcess launches a new process within the computeSystem. +func (computeSystem *System) CreateProcess(ctx context.Context, c interface{}) (cow.Process, error) { + operation := "hcs::System::CreateProcess" + process, processInfo, err := computeSystem.createProcess(ctx, operation, c) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + process.Close() + } + }() + + pipes, err := makeOpenFiles([]syscall.Handle{processInfo.StdInput, processInfo.StdOutput, processInfo.StdError}) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + process.stdin = pipes[0] + process.stdout = pipes[1] + process.stderr = pipes[2] + process.hasCachedStdio = true + + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// OpenProcess gets an interface to an existing process within the computeSystem. +func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process, error) { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::OpenProcess" + + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + processHandle, resultJSON, err := vmcompute.HcsOpenProcess(ctx, computeSystem.handle, uint32(pid)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return nil, makeSystemError(computeSystem, operation, err, events) + } + + process := newProcess(processHandle, pid, computeSystem) + if err = process.registerCallback(ctx); err != nil { + return nil, makeSystemError(computeSystem, operation, err, nil) + } + go process.waitBackground() + + return process, nil +} + +// Close cleans up any state associated with the compute system but does not terminate or wait for it. +func (computeSystem *System) Close() (err error) { + operation := "hcs::System::Close" + ctx, span := trace.StartSpan(context.Background(), operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) + + computeSystem.handleLock.Lock() + defer computeSystem.handleLock.Unlock() + + // Don't double free this + if computeSystem.handle == 0 { + return nil + } + + if err = computeSystem.unregisterCallback(ctx); err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + err = vmcompute.HcsCloseComputeSystem(ctx, computeSystem.handle) + if err != nil { + return makeSystemError(computeSystem, operation, err, nil) + } + + computeSystem.handle = 0 + computeSystem.closedWaitOnce.Do(func() { + computeSystem.waitError = ErrAlreadyClosed + close(computeSystem.waitBlock) + }) + + return nil +} + +func (computeSystem *System) registerCallback(ctx context.Context) error { + callbackContext := ¬ificationWatcherContext{ + channels: newSystemChannels(), + systemID: computeSystem.id, + } + + callbackMapLock.Lock() + callbackNumber := nextCallback + nextCallback++ + callbackMap[callbackNumber] = callbackContext + callbackMapLock.Unlock() + + callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) + if err != nil { + return err + } + callbackContext.handle = callbackHandle + computeSystem.callbackNumber = callbackNumber + + return nil +} + +func (computeSystem *System) unregisterCallback(ctx context.Context) error { + callbackNumber := computeSystem.callbackNumber + + callbackMapLock.RLock() + callbackContext := callbackMap[callbackNumber] + callbackMapLock.RUnlock() + + if callbackContext == nil { + return nil + } + + handle := callbackContext.handle + + if handle == 0 { + return nil + } + + // hcsUnregisterComputeSystemCallback has its own syncronization + // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. + err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) + if err != nil { + return err + } + + closeChannels(callbackContext.channels) + + callbackMapLock.Lock() + delete(callbackMap, callbackNumber) + callbackMapLock.Unlock() + + handle = 0 //nolint:ineffassign + + return nil +} + +// Modify the System by sending a request to HCS +func (computeSystem *System) Modify(ctx context.Context, config interface{}) error { + computeSystem.handleLock.RLock() + defer computeSystem.handleLock.RUnlock() + + operation := "hcs::System::Modify" + + if computeSystem.handle == 0 { + return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + + requestBytes, err := json.Marshal(config) + if err != nil { + return err + } + + requestJSON := string(requestBytes) + resultJSON, err := vmcompute.HcsModifyComputeSystem(ctx, computeSystem.handle, requestJSON) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeSystemError(computeSystem, operation, err, events) + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go new file mode 100644 index 00000000000..3342e5bb948 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -0,0 +1,62 @@ +package hcs + +import ( + "context" + "io" + "syscall" + + "github.com/Microsoft/go-winio" + diskutil "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/computestorage" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// if there is an error. +func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { + fs := make([]io.ReadWriteCloser, len(hs)) + for i, h := range hs { + if h != syscall.Handle(0) { + if err == nil { + fs[i], err = winio.MakeOpenFile(h) + } + if err != nil { + syscall.Close(h) + } + } + } + if err != nil { + for _, f := range fs { + if f != nil { + f.Close() + } + } + return nil, err + } + return fs, nil +} + +// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`. +func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) { + if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil { + return errors.Wrap(err, "failed to create VHD") + } + + vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone) + if err != nil { + return errors.Wrap(err, "failed to open VHD") + } + defer func() { + err2 := windows.CloseHandle(windows.Handle(vhd)) + if err == nil { + err = errors.Wrap(err2, "failed to close VHD") + } + }() + + if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil { + return errors.Wrap(err, "failed to format VHD") + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go new file mode 100644 index 00000000000..db4e14fdfb2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -0,0 +1,68 @@ +package hcs + +import ( + "context" + "time" + + "github.com/Microsoft/hcsshim/internal/log" +) + +func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { + events := processHcsResult(ctx, resultJSON) + if IsPending(err) { + return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) + } + + return events, err +} + +func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { + callbackMapLock.RLock() + if _, ok := callbackMap[callbackNumber]; !ok { + callbackMapLock.RUnlock() + log.G(ctx).WithField("callbackNumber", callbackNumber).Error("failed to waitForNotification: callbackNumber does not exist in callbackMap") + return ErrHandleClose + } + channels := callbackMap[callbackNumber].channels + callbackMapLock.RUnlock() + + expectedChannel := channels[expectedNotification] + if expectedChannel == nil { + log.G(ctx).WithField("type", expectedNotification).Error("unknown notification type in waitForNotification") + return ErrInvalidNotificationType + } + + var c <-chan time.Time + if timeout != nil { + timer := time.NewTimer(*timeout) + c = timer.C + defer timer.Stop() + } + + select { + case err, ok := <-expectedChannel: + if !ok { + return ErrHandleClose + } + return err + case err, ok := <-channels[hcsNotificationSystemExited]: + if !ok { + return ErrHandleClose + } + // If the expected notification is hcsNotificationSystemExited which of the two selects + // chosen is random. Return the raw error if hcsNotificationSystemExited is expected + if channels[hcsNotificationSystemExited] == expectedChannel { + return err + } + return ErrUnexpectedContainerExit + case _, ok := <-channels[hcsNotificationServiceDisconnect]: + if !ok { + return ErrHandleClose + } + // hcsNotificationServiceDisconnect should never be an expected notification + // it does not need the same handling as hcsNotificationSystemExited + return ErrUnexpectedProcessAbort + case <-c: + return ErrTimeout + } +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go new file mode 100644 index 00000000000..921c2c8556c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go @@ -0,0 +1,47 @@ +package hcserror + +import ( + "fmt" + "syscall" +) + +const ERROR_GEN_FAILURE = syscall.Errno(31) + +type HcsError struct { + title string + rest string + Err error +} + +func (e *HcsError) Error() string { + s := e.title + if len(s) > 0 && s[len(s)-1] != ' ' { + s += " " + } + s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err)) + if e.rest != "" { + if e.rest[0] != ' ' { + s += " " + } + s += e.rest + } + return s +} + +func New(err error, title, rest string) error { + // Pass through DLL errors directly since they do not originate from HCS. + if _, ok := err.(*syscall.DLLError); ok { + return err + } + return &HcsError{title, rest, err} +} + +func Win32FromError(err error) uint32 { + if herr, ok := err.(*HcsError); ok { + return Win32FromError(herr.Err) + } + if code, ok := err.(syscall.Errno); ok { + return uint32(code) + } + return uint32(ERROR_GEN_FAILURE) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go new file mode 100644 index 00000000000..b2e475f53c6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go @@ -0,0 +1,23 @@ +package hns + +import "fmt" + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go + +//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? + +type EndpointNotFoundError struct { + EndpointName string +} + +func (e EndpointNotFoundError) Error() string { + return fmt.Sprintf("Endpoint %s not found", e.EndpointName) +} + +type NetworkNotFoundError struct { + NetworkName string +} + +func (e NetworkNotFoundError) Error() string { + return fmt.Sprintf("Network %s not found", e.NetworkName) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go new file mode 100644 index 00000000000..7cf954c7b26 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -0,0 +1,338 @@ +package hns + +import ( + "encoding/json" + "net" + "strings" + + "github.com/sirupsen/logrus" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + VirtualNetwork string `json:",omitempty"` + VirtualNetworkName string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress net.IP `json:",omitempty"` + IPv6Address net.IP `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSDomain string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + GatewayAddressV6 string `json:",omitempty"` + EnableInternalDNS bool `json:",omitempty"` + DisableICC bool `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + IPv6PrefixLength uint8 `json:",omitempty"` + IsRemoteEndpoint bool `json:",omitempty"` + EnableLowMetric bool `json:",omitempty"` + Namespace *Namespace `json:",omitempty"` + EncapOverhead uint16 `json:",omitempty"` + SharedContainers []string `json:",omitempty"` +} + +//SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest struct { + ContainerID string `json:"ContainerId,omitempty"` + SystemType SystemType `json:"SystemType"` + CompartmentID uint16 `json:"CompartmentId,omitempty"` + VirtualNICName string `json:"VirtualNicName,omitempty"` +} + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse struct { + Success bool + Error string +} + +// EndpointStats is the object that has stats for a given endpoint +type EndpointStats struct { + BytesReceived uint64 `json:"BytesReceived"` + BytesSent uint64 `json:"BytesSent"` + DroppedPacketsIncoming uint64 `json:"DroppedPacketsIncoming"` + DroppedPacketsOutgoing uint64 `json:"DroppedPacketsOutgoing"` + EndpointID string `json:"EndpointId"` + InstanceID string `json:"InstanceId"` + PacketsReceived uint64 `json:"PacketsReceived"` + PacketsSent uint64 `json:"PacketsSent"` +} + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + endpoint := &HNSEndpoint{} + err := hnsCall(method, "/endpoints/"+path, request, &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + var endpoint []HNSEndpoint + err := hnsCall("GET", "/endpoints/", "", &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// hnsEndpointStatsRequest makes a HNS call to query the stats for a given endpoint ID +func hnsEndpointStatsRequest(id string) (*EndpointStats, error) { + var stats EndpointStats + err := hnsCall("GET", "/endpointstats/"+id, "", &stats) + if err != nil { + return nil, err + } + + return &stats, nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return HNSEndpointRequest("GET", endpointID, "") +} + +// GetHNSEndpointStats get the stats for a n Endpoint by ID +func GetHNSEndpointStats(endpointID string) (*EndpointStats, error) { + return hnsEndpointStatsRequest(endpointID) +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + hnsResponse, err := HNSListEndpointRequest() + if err != nil { + return nil, err + } + for _, hnsEndpoint := range hnsResponse { + if hnsEndpoint.Name == endpointName { + return &hnsEndpoint, nil + } + } + return nil, EndpointNotFoundError{EndpointName: endpointName} +} + +type endpointAttachInfo struct { + SharedContainers json.RawMessage `json:",omitempty"` +} + +func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { + attachInfo := endpointAttachInfo{} + err := hnsCall("GET", "/endpoints/"+endpoint.Id, "", &attachInfo) + + // Return false allows us to just return the err + if err != nil { + return false, err + } + + if strings.Contains(strings.ToLower(string(attachInfo.SharedContainers)), strings.ToLower(vID)) { + return true, nil + } + + return false, nil + +} + +// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods +func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { + operation := "Create" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + return HNSEndpointRequest("POST", "", string(jsonString)) +} + +// Delete Endpoint by sending EndpointRequest to HNS +func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { + operation := "Delete" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + return HNSEndpointRequest("DELETE", endpoint.Id, "") +} + +// Update Endpoint +func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { + operation := "Update" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) + + return endpoint, err +} + +// ApplyACLPolicy applies a set of ACL Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { + operation := "ApplyACLPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ApplyProxyPolicy applies a set of Proxy Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyProxyPolicy(policies ...*ProxyPolicy) error { + operation := "ApplyProxyPolicy" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ContainerAttach attaches an endpoint to container +func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { + operation := "ContainerAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + CompartmentID: compartmentID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// ContainerDetach detaches an endpoint from container +func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { + operation := "ContainerDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// HostAttach attaches a nic on the host +func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { + operation := "HostAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + CompartmentID: compartmentID, + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) + +} + +// HostDetach detaches a nic on the host +func (endpoint *HNSEndpoint) HostDetach() error { + operation := "HostDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// VirtualMachineNICAttach attaches a endpoint to a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { + operation := "VirtualMachineNicAttach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + VirtualNICName: virtualMachineNICName, + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// VirtualMachineNICDetach detaches a endpoint from a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { + operation := "VirtualMachineNicDetach" + title := "hcsshim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go new file mode 100644 index 00000000000..2df4a57f56c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -0,0 +1,49 @@ +package hns + +import ( + "encoding/json" + "fmt" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { + var responseBuffer *uint16 + logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return nil, hcserror.New(err, "hnsCall ", "") + } + response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return nil, err + } + return hnsresponse, nil +} + +func hnsCall(method, path, request string, returnResponse interface{}) error { + hnsresponse, err := hnsCallRawResponse(method, path, request) + if err != nil { + return fmt.Errorf("failed during hnsCallRawResponse: %v", err) + } + if !hnsresponse.Success { + return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go new file mode 100644 index 00000000000..a8d8cc56aea --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go @@ -0,0 +1,28 @@ +package hns + +type HNSGlobals struct { + Version HNSVersion `json:"Version"` +} + +type HNSVersion struct { + Major int `json:"Major"` + Minor int `json:"Minor"` +} + +var ( + HNSVersion1803 = HNSVersion{Major: 7, Minor: 2} +) + +func GetHNSGlobals() (*HNSGlobals, error) { + var version HNSVersion + err := hnsCall("GET", "/globals/version", "", &version) + if err != nil { + return nil, err + } + + globals := &HNSGlobals{ + Version: version, + } + + return globals, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go new file mode 100644 index 00000000000..f12d3ab0411 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -0,0 +1,141 @@ +package hns + +import ( + "encoding/json" + "errors" + "github.com/sirupsen/logrus" + "net" +) + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet struct { + AddressPrefix string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` +} + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// HNSNetwork represents a network in HNS +type HNSNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type string `json:",omitempty"` + NetworkAdapterName string `json:",omitempty"` + SourceMac string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacPools []MacPool `json:",omitempty"` + Subnets []Subnet `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSServerCompartment uint32 `json:",omitempty"` + ManagementIP string `json:",omitempty"` + AutomaticDNS bool `json:",omitempty"` +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + var network HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return &network, nil +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + var network []HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return network, nil +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return HNSNetworkRequest("GET", networkID, "") +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + hsnnetworks, err := HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + for _, hnsnetwork := range hsnnetworks { + if hnsnetwork.Name == networkName { + return &hnsnetwork, nil + } + } + return nil, NetworkNotFoundError{NetworkName: networkName} +} + +// Create Network by sending NetworkRequest to HNS. +func (network *HNSNetwork) Create() (*HNSNetwork, error) { + operation := "Create" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + for _, subnet := range network.Subnets { + if (subnet.AddressPrefix != "") && (subnet.GatewayAddress == "") { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + } + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + return HNSNetworkRequest("POST", "", string(jsonString)) +} + +// Delete Network by sending NetworkRequest to HNS +func (network *HNSNetwork) Delete() (*HNSNetwork, error) { + operation := "Delete" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + return HNSNetworkRequest("DELETE", network.Id, "") +} + +// Creates an endpoint on the Network. +func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { + return &HNSEndpoint{ + VirtualNetwork: network.Id, + IPAddress: ipAddress, + MacAddress: string(macAddress), + } +} + +func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) + + endpoint.VirtualNetwork = network.Id + return endpoint.Create() +} + +func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateRemoteEndpoint" + title := "hcsshim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + endpoint.IsRemoteEndpoint = true + return network.CreateEndpoint(endpoint) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go new file mode 100644 index 00000000000..591a2631e45 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -0,0 +1,109 @@ +package hns + +// Type of Request Support in ModifySystem +type PolicyType string + +// RequestType const +const ( + Nat PolicyType = "NAT" + ACL PolicyType = "ACL" + PA PolicyType = "PA" + VLAN PolicyType = "VLAN" + VSID PolicyType = "VSID" + VNet PolicyType = "VNET" + L2Driver PolicyType = "L2Driver" + Isolation PolicyType = "Isolation" + QOS PolicyType = "QOS" + OutboundNat PolicyType = "OutBoundNAT" + ExternalLoadBalancer PolicyType = "ELB" + Route PolicyType = "ROUTE" + Proxy PolicyType = "PROXY" +) + +type NatPolicy struct { + Type PolicyType `json:"Type"` + Protocol string `json:",omitempty"` + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` +} + +type QosPolicy struct { + Type PolicyType `json:"Type"` + MaximumOutgoingBandwidthInBytes uint64 +} + +type IsolationPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint + VSID uint + InDefaultIsolation bool +} + +type VlanPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint +} + +type VsidPolicy struct { + Type PolicyType `json:"Type"` + VSID uint +} + +type PaPolicy struct { + Type PolicyType `json:"Type"` + PA string `json:"PA"` +} + +type OutboundNatPolicy struct { + Policy + VIP string `json:"VIP,omitempty"` + Exceptions []string `json:"ExceptionList,omitempty"` + Destinations []string `json:",omitempty"` +} + +type ProxyPolicy struct { + Type PolicyType `json:"Type"` + IP string `json:",omitempty"` + Port string `json:",omitempty"` + ExceptionList []string `json:",omitempty"` + Destination string `json:",omitempty"` + OutboundNat bool `json:",omitempty"` +} + +type ActionType string +type DirectionType string +type RuleType string + +const ( + Allow ActionType = "Allow" + Block ActionType = "Block" + + In DirectionType = "In" + Out DirectionType = "Out" + + Host RuleType = "Host" + Switch RuleType = "Switch" +) + +type ACLPolicy struct { + Type PolicyType `json:"Type"` + Id string `json:"Id,omitempty"` + Protocol uint16 `json:",omitempty"` + Protocols string `json:"Protocols,omitempty"` + InternalPort uint16 `json:",omitempty"` + Action ActionType + Direction DirectionType + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 `json:",omitempty"` + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 `json:",omitempty"` + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 `json:",omitempty"` + ServiceName string `json:",omitempty"` +} + +type Policy struct { + Type PolicyType `json:"Type"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go new file mode 100644 index 00000000000..31322a68167 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go @@ -0,0 +1,201 @@ +package hns + +import ( + "encoding/json" + + "github.com/sirupsen/logrus" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy struct { + Policy + DestinationPrefix string `json:"DestinationPrefix,omitempty"` + NextHop string `json:"NextHop,omitempty"` + EncapEnabled bool `json:"NeedEncap,omitempty"` +} + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy struct { + LBPolicy + SourceVIP string `json:"SourceVIP,omitempty"` + VIPs []string `json:"VIPs,omitempty"` + ILB bool `json:"ILB,omitempty"` + DSR bool `json:"IsDSR,omitempty"` +} + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy struct { + Policy + Protocol uint16 `json:"Protocol,omitempty"` + InternalPort uint16 + ExternalPort uint16 +} + +// PolicyList is a structure defining schema for Policy list request +type PolicyList struct { + ID string `json:"ID,omitempty"` + EndpointReferences []string `json:"References,omitempty"` + Policies []json.RawMessage `json:"Policies,omitempty"` +} + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + var policy PolicyList + err := hnsCall(method, "/policylists/"+path, request, &policy) + if err != nil { + return nil, err + } + + return &policy, nil +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + var plist []PolicyList + err := hnsCall("GET", "/policylists/", "", &plist) + if err != nil { + return nil, err + } + + return plist, nil +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + policylist := &PolicyList{} + err := hnsCall(method, "/policylists/"+path, request, &policylist) + if err != nil { + return nil, err + } + + return policylist, nil +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return PolicyListRequest("GET", policyListID, "") +} + +// Create PolicyList by sending PolicyListRequest to HNS. +func (policylist *PolicyList) Create() (*PolicyList, error) { + operation := "Create" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + jsonString, err := json.Marshal(policylist) + if err != nil { + return nil, err + } + return PolicyListRequest("POST", "", string(jsonString)) +} + +// Delete deletes PolicyList +func (policylist *PolicyList) Delete() (*PolicyList, error) { + operation := "Delete" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + + return PolicyListRequest("DELETE", policylist.ID, "") +} + +// AddEndpoint add an endpoint to a Policy List +func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "AddEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + + return policylist.Create() +} + +// RemoveEndpoint removes an endpoint from the Policy List +func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "RemoveEndpoint" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + elementToRemove := "/endpoints/" + endpoint.Id + + var references []string + + for _, endpointReference := range policylist.EndpointReferences { + if endpointReference == elementToRemove { + continue + } + references = append(references, endpointReference) + } + policylist.EndpointReferences = references + return policylist.Create() +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + operation := "AddLoadBalancer" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) + + policylist := &PolicyList{} + + elbPolicy := &ELBPolicy{ + SourceVIP: sourceVIP, + ILB: isILB, + } + + if len(vip) > 0 { + elbPolicy.VIPs = []string{vip} + } + elbPolicy.Type = ExternalLoadBalancer + elbPolicy.Protocol = protocol + elbPolicy.InternalPort = internalPort + elbPolicy.ExternalPort = externalPort + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(elbPolicy) + if err != nil { + return nil, err + } + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + operation := "AddRoute" + title := "hcsshim::PolicyList::" + operation + logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) + + policylist := &PolicyList{} + + rPolicy := &RoutePolicy{ + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + EncapEnabled: encapEnabled, + } + rPolicy.Type = Route + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(rPolicy) + if err != nil { + return nil, err + } + + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go new file mode 100644 index 00000000000..d5efba7f284 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go @@ -0,0 +1,49 @@ +package hns + +import ( + "github.com/sirupsen/logrus" +) + +type HNSSupportedFeatures struct { + Acl HNSAclFeatures `json:"ACL"` +} + +type HNSAclFeatures struct { + AclAddressLists bool `json:"AclAddressLists"` + AclNoHostRulePriority bool `json:"AclHostRulePriority"` + AclPortRanges bool `json:"AclPortRanges"` + AclRuleId bool `json:"AclRuleId"` +} + +func GetHNSSupportedFeatures() HNSSupportedFeatures { + var hnsFeatures HNSSupportedFeatures + + globals, err := GetHNSGlobals() + if err != nil { + // Expected on pre-1803 builds, all features will be false/unsupported + logrus.Debugf("Unable to obtain HNS globals: %s", err) + return hnsFeatures + } + + hnsFeatures.Acl = HNSAclFeatures{ + AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803), + AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803), + } + + return hnsFeatures +} + +func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool { + if currentVersion.Major < minVersionSupported.Major { + return false + } + if currentVersion.Major > minVersionSupported.Major { + return true + } + if currentVersion.Minor < minVersionSupported.Minor { + return false + } + return true +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go new file mode 100644 index 00000000000..d3b04eefe0c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -0,0 +1,111 @@ +package hns + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strings" +) + +type namespaceRequest struct { + IsDefault bool `json:",omitempty"` +} + +type namespaceEndpointRequest struct { + ID string `json:"Id"` +} + +type NamespaceResource struct { + Type string + Data json.RawMessage +} + +type namespaceResourceRequest struct { + Type string + Data interface{} +} + +type Namespace struct { + ID string + IsDefault bool `json:",omitempty"` + ResourceList []NamespaceResource `json:",omitempty"` + CompartmentId uint32 `json:",omitempty"` +} + +func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) { + var err error + hnspath := "/namespaces/" + if id != nil { + hnspath = path.Join(hnspath, *id) + } + if subpath != "" { + hnspath = path.Join(hnspath, subpath) + } + var reqJSON []byte + if request != nil { + if reqJSON, err = json.Marshal(request); err != nil { + return nil, err + } + } + var ns Namespace + err = hnsCall(method, hnspath, string(reqJSON), &ns) + if err != nil { + if strings.Contains(err.Error(), "Element not found.") { + return nil, os.ErrNotExist + } + return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) + } + return &ns, err +} + +func CreateNamespace() (string, error) { + req := namespaceRequest{} + ns, err := issueNamespaceRequest(nil, "POST", "", &req) + if err != nil { + return "", err + } + return ns.ID, nil +} + +func RemoveNamespace(id string) error { + _, err := issueNamespaceRequest(&id, "DELETE", "", nil) + return err +} + +func GetNamespaceEndpoints(id string) ([]string, error) { + ns, err := issueNamespaceRequest(&id, "GET", "", nil) + if err != nil { + return nil, err + } + var endpoints []string + for _, rsrc := range ns.ResourceList { + if rsrc.Type == "Endpoint" { + var endpoint namespaceEndpointRequest + err = json.Unmarshal(rsrc.Data, &endpoint) + if err != nil { + return nil, fmt.Errorf("unmarshal endpoint: %s", err) + } + endpoints = append(endpoints, endpoint.ID) + } + } + return endpoints, nil +} + +func AddNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "addresource", &resource) + return err +} + +func RemoveNamespaceEndpoint(id string, endpointID string) error { + resource := namespaceResourceRequest{ + Type: "Endpoint", + Data: namespaceEndpointRequest{endpointID}, + } + _, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go new file mode 100644 index 00000000000..204633a4887 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -0,0 +1,76 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hns + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHNSCall = modvmcompute.NewProc("HNSCall") +) + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + if hr = procHNSCall.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go new file mode 100644 index 00000000000..922f7c679e0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -0,0 +1,23 @@ +package interop + +import ( + "syscall" + "unsafe" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go + +//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree + +func ConvertAndFreeCoTaskMemString(buffer *uint16) string { + str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:]) + coTaskMemFree(unsafe.Pointer(buffer)) + return str +} + +func Win32FromHresult(hr uintptr) syscall.Errno { + if hr&0x1fff0000 == 0x00070000 { + return syscall.Errno(hr & 0xffff) + } + return syscall.Errno(hr) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go new file mode 100644 index 00000000000..12b0c71c5ae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -0,0 +1,48 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package interop + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll") + + procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree") +) + +func coTaskMemFree(buffer unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go new file mode 100644 index 00000000000..ba6b1a4a53a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx` +// contains an OpenCensus `trace.Span`. +func G(ctx context.Context) *logrus.Entry { + span := trace.FromContext(ctx) + if span != nil { + sctx := span.SpanContext() + return logrus.WithFields(logrus.Fields{ + "traceID": sctx.TraceID.String(), + "spanID": sctx.SpanID.String(), + // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this? + }) + } + return logrus.NewEntry(logrus.StandardLogger()) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go new file mode 100644 index 00000000000..cf2c166d9b8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -0,0 +1,32 @@ +package logfields + +const ( + // Identifiers + + ContainerID = "cid" + UVMID = "uvm-id" + ProcessID = "pid" + + // Common Misc + + // Timeout represents an operation timeout. + Timeout = "timeout" + JSON = "json" + + // Keys/values + + Field = "field" + OCIAnnotation = "oci-annotation" + Value = "value" + + // Golang type's + + ExpectedType = "expected-type" + Bool = "bool" + Uint32 = "uint32" + Uint64 = "uint64" + + // runhcs + + VMShimOperation = "vmshim-op" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go new file mode 100644 index 00000000000..e5b8b85e09a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go @@ -0,0 +1,24 @@ +package longpath + +import ( + "path/filepath" + "strings" +) + +// LongAbs makes a path absolute and returns it in NT long path form. +func LongAbs(path string) (string, error) { + if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { + return path, nil + } + if !filepath.IsAbs(path) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + path = absPath + } + if strings.HasPrefix(path, `\\`) { + return `\\?\UNC\` + path[2:], nil + } + return `\\?\` + path, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go new file mode 100644 index 00000000000..7e95efb30d7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/mergemaps/merge.go @@ -0,0 +1,52 @@ +package mergemaps + +import "encoding/json" + +// Merge recursively merges map `fromMap` into map `ToMap`. Any pre-existing values +// in ToMap are overwritten. Values in fromMap are added to ToMap. +// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang +func Merge(fromMap, ToMap interface{}) interface{} { + switch fromMap := fromMap.(type) { + case map[string]interface{}: + ToMap, ok := ToMap.(map[string]interface{}) + if !ok { + return fromMap + } + for keyToMap, valueToMap := range ToMap { + if valueFromMap, ok := fromMap[keyToMap]; ok { + fromMap[keyToMap] = Merge(valueFromMap, valueToMap) + } else { + fromMap[keyToMap] = valueToMap + } + } + case nil: + // merge(nil, map[string]interface{...}) -> map[string]interface{...} + ToMap, ok := ToMap.(map[string]interface{}) + if ok { + return ToMap + } + } + return fromMap +} + +// MergeJSON merges the contents of a JSON string into an object representation, +// returning a new object suitable for translating to JSON. +func MergeJSON(object interface{}, additionalJSON []byte) (interface{}, error) { + if len(additionalJSON) == 0 { + return object, nil + } + objectJSON, err := json.Marshal(object) + if err != nil { + return nil, err + } + var objectMap, newMap map[string]interface{} + err = json.Unmarshal(objectJSON, &objectMap) + if err != nil { + return nil, err + } + err = json.Unmarshal(additionalJSON, &newMap) + if err != nil { + return nil, err + } + return Merge(newMap, objectMap), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go new file mode 100644 index 00000000000..f428bdaf720 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/exporter.go @@ -0,0 +1,43 @@ +package oc + +import ( + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +var _ = (trace.Exporter)(&LogrusExporter{}) + +// LogrusExporter is an OpenCensus `trace.Exporter` that exports +// `trace.SpanData` to logrus output. +type LogrusExporter struct { +} + +// ExportSpan exports `s` based on the the following rules: +// +// 1. All output will contain `s.Attributes`, `s.TraceID`, `s.SpanID`, +// `s.ParentSpanID` for correlation +// +// 2. Any calls to .Annotate will not be supported. +// +// 3. The span itself will be written at `logrus.InfoLevel` unless +// `s.Status.Code != 0` in which case it will be written at `logrus.ErrorLevel` +// providing `s.Status.Message` as the error value. +func (le *LogrusExporter) ExportSpan(s *trace.SpanData) { + // Combine all span annotations with traceID, spanID, parentSpanID + baseEntry := logrus.WithFields(logrus.Fields(s.Attributes)) + baseEntry.Data["traceID"] = s.TraceID.String() + baseEntry.Data["spanID"] = s.SpanID.String() + baseEntry.Data["parentSpanID"] = s.ParentSpanID.String() + baseEntry.Data["startTime"] = s.StartTime + baseEntry.Data["endTime"] = s.EndTime + baseEntry.Data["duration"] = s.EndTime.Sub(s.StartTime).String() + baseEntry.Data["name"] = s.Name + baseEntry.Time = s.StartTime + + level := logrus.InfoLevel + if s.Status.Code != 0 { + level = logrus.ErrorLevel + baseEntry.Data[logrus.ErrorKey] = s.Status.Message + } + baseEntry.Log(level, "Span") +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go new file mode 100644 index 00000000000..fee4765cbc4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -0,0 +1,17 @@ +package oc + +import ( + "go.opencensus.io/trace" +) + +// SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If +// `err` is `nil` assumes `trace.StatusCodeOk`. +func SetSpanStatus(span *trace.Span, err error) { + status := trace.Status{} + if err != nil { + // TODO: JTERRY75 - Handle errors in a non-generic way + status.Code = trace.StatusCodeUnknown + status.Message = err.Error() + } + span.SetStatus(status) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go new file mode 100644 index 00000000000..66b8d7e0353 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -0,0 +1,375 @@ +package safefile + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + "syscall" + "unicode/utf16" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/winapi" + + winio "github.com/Microsoft/go-winio" +) + +func OpenRoot(path string) (*os.File, error) { + longpath, err := longpath.LongAbs(path) + if err != nil { + return nil, err + } + return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) +} + +func cleanGoStringRelativePath(path string) (string, error) { + path = filepath.Clean(path) + if strings.Contains(path, ":") { + // Since alternate data streams must follow the file they + // are attached to, finding one here (out of order) is invalid. + return "", errors.New("path contains invalid character `:`") + } + fspath := filepath.FromSlash(path) + if len(fspath) > 0 && fspath[0] == '\\' { + return "", errors.New("expected relative path") + } + return fspath, nil +} + +func ntRelativePath(path string) ([]uint16, error) { + fspath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err + } + + path16 := utf16.Encode(([]rune)(fspath)) + if len(path16) > 32767 { + return nil, syscall.ENAMETOOLONG + } + + return path16, nil +} + +// openRelativeInternal opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + var ( + h uintptr + iosb winapi.IOStatusBlock + oa winapi.ObjectAttributes + ) + + cleanRelativePath, err := cleanGoStringRelativePath(path) + if err != nil { + return nil, err + } + + if root == nil || root.Fd() == 0 { + return nil, errors.New("missing root directory") + } + + pathUnicode, err := winapi.NewUnicodeString(cleanRelativePath) + if err != nil { + return nil, err + } + + oa.Length = unsafe.Sizeof(oa) + oa.ObjectName = pathUnicode + oa.RootDirectory = uintptr(root.Fd()) + oa.Attributes = winapi.OBJ_DONT_REPARSE + status := winapi.NtCreateFile( + &h, + accessMask|syscall.SYNCHRONIZE, + &oa, + &iosb, + nil, + 0, + shareFlags, + createDisposition, + winapi.FILE_OPEN_FOR_BACKUP_INTENT|winapi.FILE_SYNCHRONOUS_IO_NONALERT|flags, + nil, + 0, + ) + if status != 0 { + return nil, winapi.RtlNtStatusToDosError(status) + } + + fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path)) + if err != nil { + syscall.Close(syscall.Handle(h)) + return nil, err + } + + return os.NewFile(h, fullPath), nil +} + +// OpenRelative opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) + if err != nil { + err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} + } + return f, err +} + +// LinkRelative creates a hard link from oldname to newname (relative to oldroot +// and newroot), failing if any of the intermediate path components are reparse +// points. +func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { + // Open the old file. + oldf, err := openRelativeInternal( + oldname, + oldroot, + syscall.FILE_WRITE_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + 0, + ) + if err != nil { + return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer oldf.Close() + + // Open the parent of the new file. + var parent *os.File + parentPath := filepath.Dir(newname) + if parentPath != "." { + parent, err = openRelativeInternal( + parentPath, + newroot, + syscall.GENERIC_READ, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_DIRECTORY_FILE) + if err != nil { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer parent.Close() + + fi, err := winio.GetFileBasicInfo(parent) + if err != nil { + return err + } + if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} + } + + } else { + parent = newroot + } + + // Issue an NT call to create the link. This will be safe because NT will + // not open any more directories to create the link, so it cannot walk any + // more reparse points. + newbase := filepath.Base(newname) + newbase16, err := ntRelativePath(newbase) + if err != nil { + return err + } + + size := int(unsafe.Offsetof(winapi.FileLinkInformation{}.FileName)) + len(newbase16)*2 + linkinfoBuffer := winapi.LocalAlloc(0, size) + defer winapi.LocalFree(linkinfoBuffer) + + linkinfo := (*winapi.FileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) + linkinfo.RootDirectory = parent.Fd() + linkinfo.FileNameLength = uint32(len(newbase16) * 2) + copy(winapi.Uint16BufferToSlice(&linkinfo.FileName[0], len(newbase16)), newbase16) + + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( + oldf.Fd(), + &iosb, + linkinfoBuffer, + uint32(size), + winapi.FileLinkInformationClass, + ) + if status != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: winapi.RtlNtStatusToDosError(status)} + } + + return nil +} + +// deleteOnClose marks a file to be deleted when the handle is closed. +func deleteOnClose(f *os.File) error { + disposition := winapi.FileDispositionInformationEx{Flags: winapi.FILE_DISPOSITION_DELETE} + var iosb winapi.IOStatusBlock + status := winapi.NtSetInformationFile( + f.Fd(), + &iosb, + uintptr(unsafe.Pointer(&disposition)), + uint32(unsafe.Sizeof(disposition)), + winapi.FileDispositionInformationExClass, + ) + if status != 0 { + return winapi.RtlNtStatusToDosError(status) + } + return nil +} + +// clearReadOnly clears the readonly attribute on a file. +func clearReadOnly(f *os.File) error { + bi, err := winio.GetFileBasicInfo(f) + if err != nil { + return err + } + if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { + return nil + } + sbi := winio.FileBasicInfo{ + FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, + } + if sbi.FileAttributes == 0 { + sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL + } + return winio.SetFileBasicInfo(f, &sbi) +} + +// RemoveRelative removes a file or directory relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + winapi.FILE_READ_ATTRIBUTES|winapi.FILE_WRITE_ATTRIBUTES|winapi.DELETE, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err == nil { + defer f.Close() + err = deleteOnClose(f) + if err == syscall.ERROR_ACCESS_DENIED { + // Maybe the file is marked readonly. Clear the bit and retry. + _ = clearReadOnly(f) + err = deleteOnClose(f) + } + } + if err != nil { + return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} + } + return nil +} + +// RemoveAllRelative removes a directory tree relative to a root, failing if any +// intermediate path components are reparse points. +func RemoveAllRelative(path string, root *os.File) error { + fi, err := LstatRelative(path, root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + // If this is a reparse point, it can't have children. Simple remove will do. + err := RemoveRelative(path, root) + if err == nil || os.IsNotExist(err) { + return nil + } + return err + } + + // It is necessary to use os.Open as Readdirnames does not work with + // OpenRelative. This is safe because the above lstatrelative fails + // if the target is outside the root, and we know this is not a + // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. + fd, err := os.Open(filepath.Join(root.Name(), path)) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + return err + } + + // Remove contents & return first error. + for { + names, err1 := fd.Readdirnames(100) + for _, name := range names { + err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root) + if err == nil { + err = err1 + } + } + if err1 == io.EOF { + break + } + // If Readdirnames returned an error, use it. + if err == nil { + err = err1 + } + if len(names) == 0 { + break + } + } + fd.Close() + + // Remove directory. + err1 := RemoveRelative(path, root) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + if err == nil { + err = err1 + } + return err +} + +// MkdirRelative creates a directory relative to a root, failing if any +// intermediate path components are reparse points. +func MkdirRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_CREATE, + winapi.FILE_DIRECTORY_FILE) + if err == nil { + f.Close() + } else { + err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} + } + return err +} + +// LstatRelative performs a stat operation on a file relative to a root, failing +// if any intermediate path components are reparse points. +func LstatRelative(path string, root *os.File) (os.FileInfo, error) { + f, err := openRelativeInternal( + path, + root, + winapi.FILE_READ_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} + } + defer f.Close() + return f.Stat() +} + +// EnsureNotReparsePointRelative validates that a given file (relative to a +// root) and all intermediate path components are not a reparse points. +func EnsureNotReparsePointRelative(path string, root *os.File) error { + // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. + f, err := OpenRelative( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + winapi.FILE_OPEN, + 0) + if err != nil { + return err + } + f.Close() + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go new file mode 100644 index 00000000000..eaf39fa5132 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/timeout/timeout.go @@ -0,0 +1,74 @@ +package timeout + +import ( + "os" + "strconv" + "time" +) + +var ( + // defaultTimeout is the timeout for most operations that is not overridden. + defaultTimeout = 4 * time.Minute + + // defaultTimeoutTestdRetry is the retry loop timeout for testd to respond + // for a disk to come online in LCOW. + defaultTimeoutTestdRetry = 5 * time.Second +) + +// External variables for HCSShim consumers to use. +var ( + // SystemCreate is the timeout for creating a compute system + SystemCreate time.Duration = defaultTimeout + + // SystemStart is the timeout for starting a compute system + SystemStart time.Duration = defaultTimeout + + // SystemPause is the timeout for pausing a compute system + SystemPause time.Duration = defaultTimeout + + // SystemResume is the timeout for resuming a compute system + SystemResume time.Duration = defaultTimeout + + // SystemSave is the timeout for saving a compute system + SystemSave time.Duration = defaultTimeout + + // SyscallWatcher is the timeout before warning of a potential stuck platform syscall. + SyscallWatcher time.Duration = defaultTimeout + + // Tar2VHD is the timeout for the tar2vhd operation to complete + Tar2VHD time.Duration = defaultTimeout + + // ExternalCommandToStart is the timeout for external commands to start + ExternalCommandToStart = defaultTimeout + + // ExternalCommandToComplete is the timeout for external commands to complete. + // Generally this means copying data from their stdio pipes. + ExternalCommandToComplete = defaultTimeout + + // TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW + TestDRetryLoop = defaultTimeoutTestdRetry +) + +func init() { + SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate) + SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart) + SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause) + SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume) + SystemSave = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSAVE", SystemSave) + SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher) + Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD) + ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart) + ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete) + TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop) +} + +func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration { + envTimeout := os.Getenv(env) + if len(envTimeout) > 0 { + e, err := strconv.Atoi(envTimeout) + if err == nil && e > 0 { + return time.Second * time.Duration(e) + } + } + return defaultValue +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go new file mode 100644 index 00000000000..e7f114b67aa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -0,0 +1,610 @@ +package vmcompute + +import ( + gcontext "context" + "syscall" + "time" + + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/timeout" + "go.opencensus.io/trace" +) + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go + +//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? +//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? +//sys hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? +//sys hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? +//sys hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? +//sys hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? +//sys hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? +//sys hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? +//sys hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? +//sys hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? +//sys hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? +//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? +//sys hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? +//sys hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? +//sys hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsSaveComputeSystem? + +//sys hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? +//sys hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? +//sys hcsCloseProcess(process HcsProcess) (hr error) = vmcompute.HcsCloseProcess? +//sys hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? +//sys hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsSignalProcess? +//sys hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? +//sys hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? +//sys hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? +//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? +//sys hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? +//sys hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? + +// errVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously +const errVmcomputeOperationPending = syscall.Errno(0xC0370103) + +// HcsSystem is the handle associated with a created compute system. +type HcsSystem syscall.Handle + +// HcsProcess is the handle associated with a created process in a compute +// system. +type HcsProcess syscall.Handle + +// HcsCallback is the handle associated with the function to call when events +// occur. +type HcsCallback syscall.Handle + +// HcsProcessInformation is the structure used when creating or getting process +// info. +type HcsProcessInformation struct { + // ProcessId is the pid of the created process. + ProcessId uint32 + reserved uint32 //nolint:structcheck + // StdInput is the handle associated with the stdin of the process. + StdInput syscall.Handle + // StdOutput is the handle associated with the stdout of the process. + StdOutput syscall.Handle + // StdError is the handle associated with the stderr of the process. + StdError syscall.Handle +} + +func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { + if timeout > 0 { + var cancel gcontext.CancelFunc + ctx, cancel = gcontext.WithTimeout(ctx, timeout) + defer cancel() + } + + done := make(chan error, 1) + go func() { + done <- f() + }() + select { + case <-ctx.Done(): + if ctx.Err() == gcontext.DeadlineExceeded { + log.G(ctx).WithField(logfields.Timeout, timeout). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") + } + return ctx.Err() + case err := <-done: + return err + } +} + +func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("query", query)) + + return computeSystems, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + computeSystemsp *uint16 + resultp *uint16 + ) + err := hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp) + if computeSystemsp != nil { + computeSystems = interop.ConvertAndFreeCoTaskMemString(computeSystemsp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes( + trace.StringAttribute("id", id), + trace.StringAttribute("configuration", configuration)) + + return computeSystem, result, execute(ctx, timeout.SystemCreate, func() error { + var resultp *uint16 + err := hcsCreateComputeSystem(id, configuration, identity, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return computeSystem, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenComputeSystem(id, &computeSystem, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseComputeSystem(computeSystem) + }) +} + +func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemStart, func() error { + var resultp *uint16 + err := hcsStartComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsShutdownComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemPause, func() error { + var resultp *uint16 + err := hcsPauseComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SystemResume, func() error { + var resultp *uint16 + err := hcsResumeComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetComputeSystemProperties(computeSystem, propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("configuration", configuration)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyComputeSystem(computeSystem, configuration, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyServiceSettings(settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterComputeSystemCallback(computeSystem, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterComputeSystemCallback(callbackHandle) + }) +} + +func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCreateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("processParameters", processParameters)) + + return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsCreateProcess(computeSystem, processParameters, &processInformation, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsOpenProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.Int64Attribute("pid", int64(pid))) + + return process, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsOpenProcess(computeSystem, pid, &process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsCloseProcess") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsCloseProcess(process) + }) +} + +func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsTerminateProcess(process, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSignalProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("options", options)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSignalProcess(process, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processInformation, result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsGetProcessInfo(process, &processInformation, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + + return processProperties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + processPropertiesp *uint16 + resultp *uint16 + ) + err := hcsGetProcessProperties(process, &processPropertiesp, &resultp) + if processPropertiesp != nil { + processProperties = interop.ConvertAndFreeCoTaskMemString(processPropertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsModifyProcess") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("settings", settings)) + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsModifyProcess(process, settings, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + oc.SetSpanStatus(span, hr) + }() + span.AddAttributes(trace.StringAttribute("propertyQuery", propertyQuery)) + + return properties, result, execute(ctx, timeout.SyscallWatcher, func() error { + var ( + propertiesp *uint16 + resultp *uint16 + ) + err := hcsGetServiceProperties(propertyQuery, &propertiesp, &resultp) + if propertiesp != nil { + properties = interop.ConvertAndFreeCoTaskMemString(propertiesp) + } + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} + +func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return callbackHandle, execute(ctx, timeout.SyscallWatcher, func() error { + return hcsRegisterProcessCallback(process, callback, context, &callbackHandle) + }) +} + +func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { + ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback") + defer span.End() + defer func() { oc.SetSpanStatus(span, hr) }() + + return execute(ctx, timeout.SyscallWatcher, func() error { + return hcsUnregisterProcessCallback(callbackHandle) + }) +} + +func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { + ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") + defer span.End() + defer func() { + if result != "" { + span.AddAttributes(trace.StringAttribute("result", result)) + } + if hr != errVmcomputeOperationPending { + oc.SetSpanStatus(span, hr) + } + }() + + return result, execute(ctx, timeout.SyscallWatcher, func() error { + var resultp *uint16 + err := hcsSaveComputeSystem(computeSystem, options, &resultp) + if resultp != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultp) + } + return err + }) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go new file mode 100644 index 00000000000..cae55058deb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -0,0 +1,581 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package vmcompute + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + + procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") + procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") + procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") + procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") + procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") + procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") + procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") + procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") + procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") + procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") + procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") + procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") + procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") + procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") + procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") + procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") + procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") +) + +func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcsEnumerateComputeSystems(_p0, computeSystems, result) +} + +func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { + if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) +} + +func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { + if hr = procHcsCreateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _hcsOpenComputeSystem(_p0, computeSystem, result) +} + +func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { + if hr = procHcsOpenComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { + if hr = procHcsCloseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsStartComputeSystem(computeSystem, _p0, result) +} + +func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsStartComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsShutdownComputeSystem(computeSystem, _p0, result) +} + +func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsShutdownComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsTerminateComputeSystem(computeSystem, _p0, result) +} + +func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsTerminateComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsPauseComputeSystem(computeSystem, _p0, result) +} + +func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsPauseComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsResumeComputeSystem(computeSystem, _p0, result) +} + +func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsResumeComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) +} + +func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(configuration) + if hr != nil { + return + } + return _hcsModifyComputeSystem(computeSystem, _p0, result) +} + +func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { + if hr = procHcsModifyComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyServiceSettings(_p0, result) +} + +func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyServiceSettings.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { + if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSaveComputeSystem(computeSystem, _p0, result) +} + +func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + if hr = procHcsSaveComputeSystem.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(processParameters) + if hr != nil { + return + } + return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) +} + +func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + if hr = procHcsCreateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { + if hr = procHcsOpenProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsCloseProcess(process HcsProcess) (hr error) { + if hr = procHcsCloseProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { + if hr = procHcsTerminateProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { + return + } + return _hcsSignalProcess(process, _p0, result) +} + +func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { + if hr = procHcsSignalProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { + if hr = procHcsGetProcessInfo.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { + if hr = procHcsGetProcessProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcsModifyProcess(process, _p0, result) +} + +func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { + if hr = procHcsModifyProcess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + if hr != nil { + return + } + return _hcsGetServiceProperties(_p0, properties, result) +} + +func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcsGetServiceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + if hr = procHcsRegisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { + if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go new file mode 100644 index 00000000000..5debe974d41 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -0,0 +1,27 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ActivateLayer will find the layer with the given id and mount it's filesystem. +// For a read/write layer, the mounted filesystem will appear as a volume on the +// host, while a read-only layer is generally expected to be a no-op. +// An activated layer must later be deactivated via DeactivateLayer. +func ActivateLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ActivateLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = activateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go new file mode 100644 index 00000000000..3ec708d1ed3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go @@ -0,0 +1,182 @@ +package wclayer + +import ( + "context" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" + "go.opencensus.io/trace" +) + +type baseLayerWriter struct { + ctx context.Context + s *trace.Span + + root *os.File + f *os.File + bw *winio.BackupFileWriter + err error + hasUtilityVM bool + dirInfo []dirInfo +} + +type dirInfo struct { + path string + fileInfo winio.FileBasicInfo +} + +// reapplyDirectoryTimes reapplies directory modification, creation, etc. times +// after processing of the directory tree has completed. The times are expected +// to be ordered such that parent directories come before child directories. +func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { + for i := range dis { + di := &dis[len(dis)-i-1] // reverse order: process child directories first + f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_OPEN, winapi.FILE_DIRECTORY_FILE|syscall.FILE_FLAG_OPEN_REPARSE_POINT) + if err != nil { + return err + } + + err = winio.SetFileBasicInfo(f, &di.fileInfo) + f.Close() + if err != nil { + return err + } + + } + return nil +} + +func (w *baseLayerWriter) closeCurrentFile() error { + if w.f != nil { + err := w.bw.Close() + err2 := w.f.Close() + w.f = nil + w.bw = nil + if err != nil { + return err + } + if err2 != nil { + return err2 + } + } + return nil +} + +func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + if filepath.ToSlash(name) == `UtilityVM/Files` { + w.hasUtilityVM = true + } + + var f *os.File + defer func() { + if f != nil { + f.Close() + } + }() + + extraFlags := uint32(0) + if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + extraFlags |= winapi.FILE_DIRECTORY_FILE + w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) + } + + mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) + f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, extraFlags) + if err != nil { + return hcserror.New(err, "Failed to safefile.OpenRelative", name) + } + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return hcserror.New(err, "Failed to SetFileBasicInfo", name) + } + + w.f = f + w.bw = winio.NewBackupFileWriter(f, true) + f = nil + return nil +} + +func (w *baseLayerWriter) AddLink(name string, target string) (err error) { + defer func() { + if err != nil { + w.err = err + } + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + + return safefile.LinkRelative(target, w.root, name, w.root) +} + +func (w *baseLayerWriter) Remove(name string) error { + return errors.New("base layer cannot have tombstones") +} + +func (w *baseLayerWriter) Write(b []byte) (int, error) { + n, err := w.bw.Write(b) + if err != nil { + w.err = err + } + return n, err +} + +func (w *baseLayerWriter) Close() (err error) { + defer w.s.End() + defer func() { oc.SetSpanStatus(w.s, err) }() + defer func() { + w.root.Close() + w.root = nil + }() + + err = w.closeCurrentFile() + if err != nil { + return err + } + if w.err == nil { + // Restore the file times of all the directories, since they may have + // been modified by creating child directories. + err = reapplyDirectoryTimes(w.root, w.dirInfo) + if err != nil { + return err + } + + err = ProcessBaseLayer(w.ctx, w.root.Name()) + if err != nil { + return err + } + + if w.hasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root) + if err != nil { + return err + } + err = ProcessUtilityVMImage(w.ctx, filepath.Join(w.root.Name(), "UtilityVM")) + if err != nil { + return err + } + } + } + return w.err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go new file mode 100644 index 00000000000..480aee8725c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -0,0 +1,27 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// CreateLayer creates a new, empty, read-only layer on the filesystem based on +// the parent layer provided. +func CreateLayer(ctx context.Context, path, parent string) (err error) { + title := "hcsshim::CreateLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parent", parent)) + + err = createLayer(&stdDriverInfo, path, parent) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go new file mode 100644 index 00000000000..131aa94f14b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -0,0 +1,34 @@ +package wclayer + +import ( + "context" + "strings" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// CreateScratchLayer creates and populates new read-write layer for use by a container. +// This requires the full list of paths to all parent layers up to the base +func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { + title := "hcsshim::CreateScratchLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = createSandboxLayer(&stdDriverInfo, path, 0, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go new file mode 100644 index 00000000000..d5bf2f5bdc5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -0,0 +1,24 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. +func DeactivateLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::DeactivateLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = deactivateLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title+"- failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go new file mode 100644 index 00000000000..424467ac331 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -0,0 +1,25 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// DestroyLayer will remove the on-disk files representing the layer with the given +// path, including that layer's containing folder, if any. +func DestroyLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::DestroyLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = destroyLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go new file mode 100644 index 00000000000..035c9041e68 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -0,0 +1,140 @@ +package wclayer + +import ( + "context" + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/osversion" + "go.opencensus.io/trace" +) + +// ExpandScratchSize expands the size of a layer to at least size bytes. +func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { + title := "hcsshim::ExpandScratchSize" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.Int64Attribute("size", int64(size))) + + err = expandSandboxSize(&stdDriverInfo, path, size) + if err != nil { + return hcserror.New(err, title, "") + } + + // Manually expand the volume now in order to work around bugs in 19H1 and + // prerelease versions of Vb. Remove once this is fixed in Windows. + if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { + err = expandSandboxVolume(ctx, path) + if err != nil { + return err + } + } + return nil +} + +type virtualStorageType struct { + DeviceID uint32 + VendorID [16]byte +} + +type openVersion2 struct { + GetInfoOnly int32 // bool but 4-byte aligned + ReadOnly int32 // bool but 4-byte aligned + ResiliencyGUID [16]byte // GUID +} + +type openVirtualDiskParameters struct { + Version uint32 // Must always be set to 2 + Version2 openVersion2 +} + +func attachVhd(path string) (syscall.Handle, error) { + var ( + defaultType virtualStorageType + handle syscall.Handle + ) + parameters := openVirtualDiskParameters{Version: 2} + err := openVirtualDisk( + &defaultType, + path, + 0, + 0, + ¶meters, + &handle) + if err != nil { + return 0, &os.PathError{Op: "OpenVirtualDisk", Path: path, Err: err} + } + err = attachVirtualDisk(handle, 0, 0, 0, 0, 0) + if err != nil { + syscall.Close(handle) + return 0, &os.PathError{Op: "AttachVirtualDisk", Path: path, Err: err} + } + return handle, nil +} + +func expandSandboxVolume(ctx context.Context, path string) error { + // Mount the sandbox VHD temporarily. + vhdPath := filepath.Join(path, "sandbox.vhdx") + vhd, err := attachVhd(vhdPath) + if err != nil { + return &os.PathError{Op: "OpenVirtualDisk", Path: vhdPath, Err: err} + } + defer syscall.Close(vhd) + + // Open the volume. + volumePath, err := GetLayerMountPath(ctx, path) + if err != nil { + return err + } + if volumePath[len(volumePath)-1] == '\\' { + volumePath = volumePath[:len(volumePath)-1] + } + volume, err := os.OpenFile(volumePath, os.O_RDWR, 0) + if err != nil { + return err + } + defer volume.Close() + + // Get the volume's underlying partition size in NTFS clusters. + var ( + partitionSize int64 + bytes uint32 + ) + const _IOCTL_DISK_GET_LENGTH_INFO = 0x0007405C + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _IOCTL_DISK_GET_LENGTH_INFO, nil, 0, (*byte)(unsafe.Pointer(&partitionSize)), 8, &bytes, nil) + if err != nil { + return &os.PathError{Op: "IOCTL_DISK_GET_LENGTH_INFO", Path: volume.Name(), Err: err} + } + const ( + clusterSize = 4096 + sectorSize = 512 + ) + targetClusters := partitionSize / clusterSize + + // Get the volume's current size in NTFS clusters. + var volumeSize int64 + err = getDiskFreeSpaceEx(volume.Name()+"\\", nil, &volumeSize, nil) + if err != nil { + return &os.PathError{Op: "GetDiskFreeSpaceEx", Path: volume.Name(), Err: err} + } + volumeClusters := volumeSize / clusterSize + + // Only resize the volume if there is space to grow, otherwise this will + // fail with invalid parameter. NTFS reserves one cluster. + if volumeClusters+1 < targetClusters { + targetSectors := targetClusters * (clusterSize / sectorSize) + const _FSCTL_EXTEND_VOLUME = 0x000900F0 + err = syscall.DeviceIoControl(syscall.Handle(volume.Fd()), _FSCTL_EXTEND_VOLUME, (*byte)(unsafe.Pointer(&targetSectors)), 8, nil, 0, &bytes, nil) + if err != nil { + return &os.PathError{Op: "FSCTL_EXTEND_VOLUME", Path: volume.Name(), Err: err} + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go new file mode 100644 index 00000000000..97b27eb7d6b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -0,0 +1,94 @@ +package wclayer + +import ( + "context" + "io/ioutil" + "os" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ExportLayer will create a folder at exportFolderPath and fill that folder with +// the transport format version of the layer identified by layerId. This transport +// format includes any metadata required for later importing the layer (using +// ImportLayer), and requires the full list of parent layer paths in order to +// perform the export. +func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ExportLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("exportFolderPath", exportFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} + +type LayerReader interface { + Next() (string, int64, *winio.FileBasicInfo, error) + Read(b []byte) (int, error) + Close() error +} + +// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. +// The caller must have taken the SeBackupPrivilege privilege +// to call this and any methods on the resulting LayerReader. +func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + exportPath, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + err = ExportLayer(ctx, path, exportPath, parentLayerPaths) + if err != nil { + os.RemoveAll(exportPath) + return nil, err + } + return &legacyLayerReaderWrapper{ + ctx: ctx, + s: span, + legacyLayerReader: newLegacyLayerReader(exportPath), + }, nil +} + +type legacyLayerReaderWrapper struct { + ctx context.Context + s *trace.Span + + *legacyLayerReader +} + +func (r *legacyLayerReaderWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + + err = r.legacyLayerReader.Close() + os.RemoveAll(r.root) + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go new file mode 100644 index 00000000000..8d213f5871a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -0,0 +1,50 @@ +package wclayer + +import ( + "context" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/log" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GetLayerMountPath will look for a mounted layer with the given path and return +// the path at which that layer can be accessed. This path may be a volume path +// if the layer is a mounted read-write layer, otherwise it is expected to be the +// folder path at which the layer is stored. +func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { + title := "hcsshim::GetLayerMountPath" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + var mountPathLength uintptr = 0 + + // Call the procedure itself. + log.G(ctx).Debug("Calling proc (1)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) + if err != nil { + return "", hcserror.New(err, title, "(first call)") + } + + // Allocate a mount path of the returned length. + if mountPathLength == 0 { + return "", nil + } + mountPathp := make([]uint16, mountPathLength) + mountPathp[0] = 0 + + // Call the procedure again + log.G(ctx).Debug("Calling proc (2)") + err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) + if err != nil { + return "", hcserror.New(err, title, "(second call)") + } + + mountPath := syscall.UTF16ToString(mountPathp[0:]) + span.AddAttributes(trace.StringAttribute("mountPath", mountPath)) + return mountPath, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go new file mode 100644 index 00000000000..ae1fff84036 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -0,0 +1,29 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GetSharedBaseImages will enumerate the images stored in the common central +// image store and return descriptive info about those images for the purpose +// of registering them with the graphdriver, graph, and tagstore. +func GetSharedBaseImages(ctx context.Context) (_ string, err error) { + title := "hcsshim::GetSharedBaseImages" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + + var buffer *uint16 + err = getBaseImages(&buffer) + if err != nil { + return "", hcserror.New(err, title, "") + } + imageData := interop.ConvertAndFreeCoTaskMemString(buffer) + span.AddAttributes(trace.StringAttribute("imageData", imageData)) + return imageData, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go new file mode 100644 index 00000000000..4b282fef9db --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -0,0 +1,26 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// GrantVmAccess adds access to a file for a given VM +func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { + title := "hcsshim::GrantVmAccess" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("vm-id", vmid), + trace.StringAttribute("path", filepath)) + + err = grantVmAccess(vmid, filepath) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go new file mode 100644 index 00000000000..687550f0be3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -0,0 +1,166 @@ +package wclayer + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "go.opencensus.io/trace" +) + +// ImportLayer will take the contents of the folder at importFolderPath and import +// that into a layer with the id layerId. Note that in order to correctly populate +// the layer and interperet the transport format, all parent layers must already +// be present on the system at the paths provided in parentLayerPaths. +func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { + title := "hcsshim::ImportLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("importFolderPath", importFolderPath), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + err = importLayer(&stdDriverInfo, path, importFolderPath, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} + +// LayerWriter is an interface that supports writing a new container image layer. +type LayerWriter interface { + // Add adds a file to the layer with given metadata. + Add(name string, fileInfo *winio.FileBasicInfo) error + // AddLink adds a hard link to the layer. The target must already have been added. + AddLink(name string, target string) error + // Remove removes a file that was present in a parent layer from the layer. + Remove(name string) error + // Write writes data to the current file. The data must be in the format of a Win32 + // backup stream. + Write(b []byte) (int, error) + // Close finishes the layer writing process and releases any resources. + Close() error +} + +type legacyLayerWriterWrapper struct { + ctx context.Context + s *trace.Span + + *legacyLayerWriter + path string + parentLayerPaths []string +} + +func (r *legacyLayerWriterWrapper) Close() (err error) { + defer r.s.End() + defer func() { oc.SetSpanStatus(r.s, err) }() + defer os.RemoveAll(r.root.Name()) + defer r.legacyLayerWriter.CloseRoots() + + err = r.legacyLayerWriter.Close() + if err != nil { + return err + } + + if err = ImportLayer(r.ctx, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { + return err + } + for _, name := range r.Tombstones { + if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + } + // Add any hard links that were collected. + for _, lnk := range r.PendingLinks { + if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { + return err + } + } + + // The reapplyDirectoryTimes must be called AFTER we are done with Tombstone + // deletion and hard link creation. This is because Tombstone deletion and hard link + // creation updates the directory last write timestamps so that will change the + // timestamps added by the `Add` call. Some container applications depend on the + // correctness of these timestamps and so we should change the timestamps back to + // the original value (i.e the value provided in the Add call) after this + // processing is done. + err = reapplyDirectoryTimes(r.destRoot, r.changedDi) + if err != nil { + return err + } + + // Prepare the utility VM for use if one is present in the layer. + if r.HasUtilityVM { + err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot) + if err != nil { + return err + } + err = ProcessUtilityVMImage(r.ctx, filepath.Join(r.destRoot.Name(), "UtilityVM")) + if err != nil { + return err + } + } + return nil +} + +// NewLayerWriter returns a new layer writer for creating a layer on disk. +// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges +// to call this and any methods on the resulting LayerWriter. +func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { + ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter") + defer func() { + if err != nil { + oc.SetSpanStatus(span, err) + span.End() + } + }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + if len(parentLayerPaths) == 0 { + // This is a base layer. It gets imported differently. + f, err := safefile.OpenRoot(path) + if err != nil { + return nil, err + } + return &baseLayerWriter{ + ctx: ctx, + s: span, + root: f, + }, nil + } + + importPath, err := ioutil.TempDir("", "hcs") + if err != nil { + return nil, err + } + w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path) + if err != nil { + return nil, err + } + return &legacyLayerWriterWrapper{ + ctx: ctx, + s: span, + legacyLayerWriter: w, + path: importPath, + parentLayerPaths: parentLayerPaths, + }, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go new file mode 100644 index 00000000000..01e67233939 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -0,0 +1,28 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// LayerExists will return true if a layer with the given id exists and is known +// to the system. +func LayerExists(ctx context.Context, path string) (_ bool, err error) { + title := "hcsshim::LayerExists" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + // Call the procedure itself. + var exists uint32 + err = layerExists(&stdDriverInfo, path, &exists) + if err != nil { + return false, hcserror.New(err, title, "") + } + span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) + return exists != 0, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go new file mode 100644 index 00000000000..0ce34a30f86 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go @@ -0,0 +1,22 @@ +package wclayer + +import ( + "context" + "path/filepath" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// LayerID returns the layer ID of a layer on disk. +func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { + title := "hcsshim::LayerID" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + _, file := filepath.Split(path) + return NameToGuid(ctx, file) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go new file mode 100644 index 00000000000..1ec893c6af7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -0,0 +1,97 @@ +package wclayer + +// This file contains utility functions to support storage (graph) related +// functionality. + +import ( + "context" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/sirupsen/logrus" +) + +/* To pass into syscall, we need a struct matching the following: +enum GraphDriverType +{ + DiffDriver, + FilterDriver +}; + +struct DriverInfo { + GraphDriverType Flavour; + LPCWSTR HomeDir; +}; +*/ + +type driverInfo struct { + Flavour int + HomeDirp *uint16 +} + +var ( + utf16EmptyString uint16 + stdDriverInfo = driverInfo{1, &utf16EmptyString} +) + +/* To pass into syscall, we need a struct matching the following: +typedef struct _WC_LAYER_DESCRIPTOR { + + // + // The ID of the layer + // + + GUID LayerId; + + // + // Additional flags + // + + union { + struct { + ULONG Reserved : 31; + ULONG Dirty : 1; // Created from sandbox as a result of snapshot + }; + ULONG Value; + } Flags; + + // + // Path to the layer root directory, null-terminated + // + + PCWSTR Path; + +} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; +*/ +type WC_LAYER_DESCRIPTOR struct { + LayerId guid.GUID + Flags uint32 + Pathp *uint16 +} + +func layerPathsToDescriptors(ctx context.Context, parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { + // Array of descriptors that gets constructed. + var layers []WC_LAYER_DESCRIPTOR + + for i := 0; i < len(parentLayerPaths); i++ { + g, err := LayerID(ctx, parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed to convert name to guid") + return nil, err + } + + p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) + if err != nil { + logrus.WithError(err).Debug("Failed conversion of parentLayerPath to pointer") + return nil, err + } + + layers = append(layers, WC_LAYER_DESCRIPTOR{ + LayerId: g, + Flags: 0, + Pathp: p, + }) + } + + return layers, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go new file mode 100644 index 00000000000..b7f3064f26b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -0,0 +1,811 @@ +package wclayer + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" +) + +var errorIterationCanceled = errors.New("") + +var mutatedUtilityVMFiles = map[string]bool{ + `EFI\Microsoft\Boot\BCD`: true, + `EFI\Microsoft\Boot\BCD.LOG`: true, + `EFI\Microsoft\Boot\BCD.LOG1`: true, + `EFI\Microsoft\Boot\BCD.LOG2`: true, +} + +const ( + filesPath = `Files` + hivesPath = `Hives` + utilityVMPath = `UtilityVM` + utilityVMFilesPath = `UtilityVM\Files` +) + +func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { + return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) +} + +func hasPathPrefix(p, prefix string) bool { + return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\' +} + +type fileEntry struct { + path string + fi os.FileInfo + err error +} + +type legacyLayerReader struct { + root string + result chan *fileEntry + proceed chan bool + currentFile *os.File + backupReader *winio.BackupFileReader +} + +// newLegacyLayerReader returns a new LayerReader that can read the Windows +// container layer transport format from disk. +func newLegacyLayerReader(root string) *legacyLayerReader { + r := &legacyLayerReader{ + root: root, + result: make(chan *fileEntry), + proceed: make(chan bool), + } + go r.walk() + return r +} + +func readTombstones(path string) (map[string]([]string), error) { + tf, err := os.Open(filepath.Join(path, "tombstones.txt")) + if err != nil { + return nil, err + } + defer tf.Close() + s := bufio.NewScanner(tf) + if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { + return nil, errors.New("invalid tombstones file") + } + + ts := make(map[string]([]string)) + for s.Scan() { + t := filepath.Join(filesPath, s.Text()[1:]) // skip leading `\` + dir := filepath.Dir(t) + ts[dir] = append(ts[dir], t) + } + if err = s.Err(); err != nil { + return nil, err + } + + return ts, nil +} + +func (r *legacyLayerReader) walkUntilCancelled() error { + root, err := longpath.LongAbs(r.root) + if err != nil { + return err + } + + r.root = root + ts, err := readTombstones(r.root) + if err != nil { + return err + } + + err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. + // Handle failure from what may be a golang bug in the conversion of + // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat + // which is called by filepath.Walk will fail when a filename contains + // unicode characters. Skip the recycle bin regardless which is goodness. + if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { + return filepath.SkipDir + } + + if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { + return nil + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + // List all the tombstones. + if info.IsDir() { + relPath, err := filepath.Rel(r.root, path) + if err != nil { + return err + } + if dts, ok := ts[relPath]; ok { + for _, t := range dts { + r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} + if !<-r.proceed { + return errorIterationCanceled + } + } + } + } + return nil + }) + if err == errorIterationCanceled { + return nil + } + if err == nil { + return io.EOF + } + return err +} + +func (r *legacyLayerReader) walk() { + defer close(r.result) + if !<-r.proceed { + return + } + + err := r.walkUntilCancelled() + if err != nil { + for { + r.result <- &fileEntry{err: err} + if !<-r.proceed { + return + } + } + } +} + +func (r *legacyLayerReader) reset() { + if r.backupReader != nil { + r.backupReader.Close() + r.backupReader = nil + } + if r.currentFile != nil { + r.currentFile.Close() + r.currentFile = nil + } +} + +func findBackupStreamSize(r io.Reader) (int64, error) { + br := winio.NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err != nil { + if err == io.EOF { + err = nil + } + return 0, err + } + if hdr.Id == winio.BackupData { + return hdr.Size, nil + } + } +} + +func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { + r.reset() + r.proceed <- true + fe := <-r.result + if fe == nil { + err = errors.New("LegacyLayerReader closed") + return + } + if fe.err != nil { + err = fe.err + return + } + + path, err = filepath.Rel(r.root, fe.path) + if err != nil { + return + } + + if fe.fi == nil { + // This is a tombstone. Return a nil fileInfo. + return + } + + if fe.fi.IsDir() && hasPathPrefix(path, filesPath) { + fe.path += ".$wcidirs$" + } + + f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) + if err != nil { + return + } + defer func() { + if f != nil { + f.Close() + } + }() + + fileInfo, err = winio.GetFileBasicInfo(f) + if err != nil { + return + } + + if !hasPathPrefix(path, filesPath) { + size = fe.fi.Size() + r.backupReader = winio.NewBackupFileReader(f, false) + if path == hivesPath || path == filesPath { + // The Hives directory has a non-deterministic file time because of the + // nature of the import process. Use the times from System_Delta. + var g *os.File + g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) + if err != nil { + return + } + attr := fileInfo.FileAttributes + fileInfo, err = winio.GetFileBasicInfo(g) + g.Close() + if err != nil { + return + } + fileInfo.FileAttributes = attr + } + + // The creation time and access time get reset for files outside of the Files path. + fileInfo.CreationTime = fileInfo.LastWriteTime + fileInfo.LastAccessTime = fileInfo.LastWriteTime + + } else { + // The file attributes are written before the backup stream. + var attr uint32 + err = binary.Read(f, binary.LittleEndian, &attr) + if err != nil { + return + } + fileInfo.FileAttributes = attr + beginning := int64(4) + + // Find the accurate file size. + if !fe.fi.IsDir() { + size, err = findBackupStreamSize(f) + if err != nil { + err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} + return + } + } + + // Return back to the beginning of the backup stream. + _, err = f.Seek(beginning, 0) + if err != nil { + return + } + } + + r.currentFile = f + f = nil + return +} + +func (r *legacyLayerReader) Read(b []byte) (int, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, io.EOF + } + return r.currentFile.Read(b) + } + return r.backupReader.Read(b) +} + +func (r *legacyLayerReader) Seek(offset int64, whence int) (int64, error) { + if r.backupReader == nil { + if r.currentFile == nil { + return 0, errors.New("no current file") + } + return r.currentFile.Seek(offset, whence) + } + return 0, errors.New("seek not supported on this stream") +} + +func (r *legacyLayerReader) Close() error { + r.proceed <- false + <-r.result + r.reset() + return nil +} + +type pendingLink struct { + Path, Target string + TargetRoot *os.File +} + +type pendingDir struct { + Path string + Root *os.File +} + +type legacyLayerWriter struct { + root *os.File + destRoot *os.File + parentRoots []*os.File + currentFile *os.File + bufWriter *bufio.Writer + currentFileName string + currentFileRoot *os.File + backupWriter *winio.BackupFileWriter + Tombstones []string + HasUtilityVM bool + changedDi []dirInfo + addedFiles map[string]bool + PendingLinks []pendingLink + pendingDirs []pendingDir + currentIsDir bool +} + +// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer +// transport format to disk. +func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { + w = &legacyLayerWriter{ + addedFiles: make(map[string]bool), + } + defer func() { + if err != nil { + w.CloseRoots() + w = nil + } + }() + w.root, err = safefile.OpenRoot(root) + if err != nil { + return + } + w.destRoot, err = safefile.OpenRoot(destRoot) + if err != nil { + return + } + for _, r := range parentRoots { + f, err := safefile.OpenRoot(r) + if err != nil { + return w, err + } + w.parentRoots = append(w.parentRoots, f) + } + w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536) + return +} + +func (w *legacyLayerWriter) CloseRoots() { + if w.root != nil { + w.root.Close() + w.root = nil + } + if w.destRoot != nil { + w.destRoot.Close() + w.destRoot = nil + } + for i := range w.parentRoots { + _ = w.parentRoots[i].Close() + } + w.parentRoots = nil +} + +func (w *legacyLayerWriter) initUtilityVM() error { + if !w.HasUtilityVM { + err := safefile.MkdirRelative(utilityVMPath, w.destRoot) + if err != nil { + return err + } + // Server 2016 does not support multiple layers for the utility VM, so + // clone the utility VM from the parent layer into this layer. Use hard + // links to avoid unnecessary copying, since most of the files are + // immutable. + err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) + if err != nil { + return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + } + w.HasUtilityVM = true + } + return nil +} + +func (w *legacyLayerWriter) reset() error { + err := w.bufWriter.Flush() + if err != nil { + return err + } + w.bufWriter.Reset(ioutil.Discard) + if w.currentIsDir { + r := w.currentFile + br := winio.NewBackupStreamReader(r) + // Seek to the beginning of the backup stream, skipping the fileattrs + if _, err := r.Seek(4, io.SeekStart); err != nil { + return err + } + + for { + bhdr, err := br.Next() + if err == io.EOF { + // end of backupstream data + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupReparseData: + // The current file is a `.$wcidirs$` metadata file that + // describes a directory reparse point. Delete the placeholder + // directory to prevent future files being added into the + // destination of the reparse point during the ImportLayer call + if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil { + return err + } + w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) + default: + // ignore all other stream types, as we only care about directory reparse points + } + } + w.currentIsDir = false + } + if w.backupWriter != nil { + w.backupWriter.Close() + w.backupWriter = nil + } + if w.currentFile != nil { + w.currentFile.Close() + w.currentFile = nil + w.currentFileName = "" + w.currentFileRoot = nil + } + return nil +} + +// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata +func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { + src, err := safefile.OpenRelative( + subPath, + srcRoot, + syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + winapi.FILE_OPEN, + winapi.FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, err + } + defer src.Close() + srcr := winio.NewBackupFileReader(src, true) + defer srcr.Close() + + fileInfo, err = winio.GetFileBasicInfo(src) + if err != nil { + return nil, err + } + + extraFlags := uint32(0) + if isDir { + extraFlags |= winapi.FILE_DIRECTORY_FILE + } + dest, err := safefile.OpenRelative( + subPath, + destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + winapi.FILE_CREATE, + extraFlags) + if err != nil { + return nil, err + } + defer dest.Close() + + err = winio.SetFileBasicInfo(dest, fileInfo) + if err != nil { + return nil, err + } + + destw := winio.NewBackupFileWriter(dest, true) + defer func() { + cerr := destw.Close() + if err == nil { + err = cerr + } + }() + + _, err = io.Copy(destw, srcr) + if err != nil { + return nil, err + } + + return fileInfo, nil +} + +// cloneTree clones a directory tree using hard links. It skips hard links for +// the file names in the provided map and just copies those files. +func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { + var di []dirInfo + err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot) + if err != nil { + return err + } + err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) + if err != nil { + return err + } + + fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes + // Directories, reparse points, and files that will be mutated during + // utility VM import must be copied. All other files can be hard linked. + isReparsePoint := fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 + // In go1.9, FileInfo.IsDir() returns false if the directory is also a symlink. + // See: https://github.com/golang/go/commit/1989921aef60c83e6f9127a8448fb5ede10e9acc + // Fixes the problem by checking syscall.FILE_ATTRIBUTE_DIRECTORY directly + isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 + + if isDir || isReparsePoint || mutatedFiles[relPath] { + fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) + if err != nil { + return err + } + if isDir { + di = append(di, dirInfo{path: relPath, fileInfo: *fi}) + } + } else { + err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return err + } + + return reapplyDirectoryTimes(destRoot, di) +} + +func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { + if err := w.reset(); err != nil { + return err + } + + if name == utilityVMPath { + return w.initUtilityVM() + } + + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + w.changedDi = append(w.changedDi, dirInfo{path: name, fileInfo: *fileInfo}) + } + + name = filepath.Clean(name) + if hasPathPrefix(name, utilityVMPath) { + if !w.HasUtilityVM { + return errors.New("missing UtilityVM directory") + } + if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { + return errors.New("invalid UtilityVM layer") + } + createDisposition := uint32(winapi.FILE_OPEN) + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + st, err := safefile.LstatRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + if st != nil { + // Delete the existing file/directory if it is not the same type as this directory. + existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil { + return err + } + st = nil + } + } + if st == nil { + if err = safefile.MkdirRelative(name, w.destRoot); err != nil { + return err + } + } + } else { + // Overwrite any existing hard link. + err := safefile.RemoveRelative(name, w.destRoot) + if err != nil && !os.IsNotExist(err) { + return err + } + createDisposition = winapi.FILE_CREATE + } + + f, err := safefile.OpenRelative( + name, + w.destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + createDisposition, + winapi.FILE_OPEN_REPARSE_POINT, + ) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + _ = safefile.RemoveRelative(name, w.destRoot) + } + }() + + err = winio.SetFileBasicInfo(f, fileInfo) + if err != nil { + return err + } + + w.backupWriter = winio.NewBackupFileWriter(f, true) + w.bufWriter.Reset(w.backupWriter) + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.destRoot + w.addedFiles[name] = true + f = nil + return nil + } + + fname := name + if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + err := safefile.MkdirRelative(name, w.root) + if err != nil { + return err + } + fname += ".$wcidirs$" + w.currentIsDir = true + } + + f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, winapi.FILE_CREATE, 0) + if err != nil { + return err + } + defer func() { + if f != nil { + f.Close() + _ = safefile.RemoveRelative(fname, w.root) + } + }() + + strippedFi := *fileInfo + strippedFi.FileAttributes = 0 + err = winio.SetFileBasicInfo(f, &strippedFi) + if err != nil { + return err + } + + if hasPathPrefix(name, hivesPath) { + w.backupWriter = winio.NewBackupFileWriter(f, false) + w.bufWriter.Reset(w.backupWriter) + } else { + w.bufWriter.Reset(f) + // The file attributes are written before the stream. + err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) + if err != nil { + w.bufWriter.Reset(ioutil.Discard) + return err + } + } + + w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.root + w.addedFiles[name] = true + f = nil + return nil +} + +func (w *legacyLayerWriter) AddLink(name string, target string) error { + if err := w.reset(); err != nil { + return err + } + + target = filepath.Clean(target) + var roots []*os.File + if hasPathPrefix(target, filesPath) { + // Look for cross-layer hard link targets in the parent layers, since + // nothing is in the destination path yet. + roots = w.parentRoots + } else if hasPathPrefix(target, utilityVMFilesPath) { + // Since the utility VM is fully cloned into the destination path + // already, look for cross-layer hard link targets directly in the + // destination path. + roots = []*os.File{w.destRoot} + } + + if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { + return errors.New("invalid hard link in layer") + } + + // Find to try the target of the link in a previously added file. If that + // fails, search in parent layers. + var selectedRoot *os.File + if _, ok := w.addedFiles[target]; ok { + selectedRoot = w.destRoot + } else { + for _, r := range roots { + if _, err := safefile.LstatRelative(target, r); err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + selectedRoot = r + break + } + } + if selectedRoot == nil { + return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) + } + } + + // The link can't be written until after the ImportLayer call. + w.PendingLinks = append(w.PendingLinks, pendingLink{ + Path: name, + Target: target, + TargetRoot: selectedRoot, + }) + w.addedFiles[name] = true + return nil +} + +func (w *legacyLayerWriter) Remove(name string) error { + name = filepath.Clean(name) + if hasPathPrefix(name, filesPath) { + w.Tombstones = append(w.Tombstones, name) + } else if hasPathPrefix(name, utilityVMFilesPath) { + err := w.initUtilityVM() + if err != nil { + return err + } + // Make sure the path exists; os.RemoveAll will not fail if the file is + // already gone, and this needs to be a fatal error for diagnostics + // purposes. + if _, err := safefile.LstatRelative(name, w.destRoot); err != nil { + return err + } + err = safefile.RemoveAllRelative(name, w.destRoot) + if err != nil { + return err + } + } else { + return fmt.Errorf("invalid tombstone %s", name) + } + + return nil +} + +func (w *legacyLayerWriter) Write(b []byte) (int, error) { + if w.backupWriter == nil && w.currentFile == nil { + return 0, errors.New("closed") + } + return w.bufWriter.Write(b) +} + +func (w *legacyLayerWriter) Close() error { + if err := w.reset(); err != nil { + return err + } + if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { + return err + } + for _, pd := range w.pendingDirs { + err := safefile.MkdirRelative(pd.Path, pd.Root) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go new file mode 100644 index 00000000000..09950297cef --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -0,0 +1,29 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// NameToGuid converts the given string into a GUID using the algorithm in the +// Host Compute Service, ensuring GUIDs generated with the same string are common +// across all clients. +func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { + title := "hcsshim::NameToGuid" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("objectName", name)) + + var id guid.GUID + err = nameToGuid(name, &id) + if err != nil { + return guid.GUID{}, hcserror.New(err, title, "") + } + span.AddAttributes(trace.StringAttribute("guid", id.String())) + return id, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go new file mode 100644 index 00000000000..90129faefbb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -0,0 +1,44 @@ +package wclayer + +import ( + "context" + "strings" + "sync" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +var prepareLayerLock sync.Mutex + +// PrepareLayer finds a mounted read-write layer matching path and enables the +// the filesystem filter for use on that layer. This requires the paths to all +// parent layers, and is necessary in order to view or interact with the layer +// as an actual filesystem (reading and writing files, creating directories, etc). +// Disabling the filter must be done via UnprepareLayer. +func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { + title := "hcsshim::PrepareLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("path", path), + trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) + + // Generate layer descriptors + layers, err := layerPathsToDescriptors(ctx, parentLayerPaths) + if err != nil { + return err + } + + // This lock is a temporary workaround for a Windows bug. Only allowing one + // call to prepareLayer at a time vastly reduces the chance of a timeout. + prepareLayerLock.Lock() + defer prepareLayerLock.Unlock() + err = prepareLayer(&stdDriverInfo, path, layers) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go new file mode 100644 index 00000000000..30bcdff5f55 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go @@ -0,0 +1,41 @@ +package wclayer + +import ( + "context" + "os" + + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// ProcessBaseLayer post-processes a base layer that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessBaseLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessBaseLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processBaseImage(path) + if err != nil { + return &os.PathError{Op: title, Path: path, Err: err} + } + return nil +} + +// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. +// The files should have been extracted to \Files. +func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { + title := "hcsshim::ProcessUtilityVMImage" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = processUtilityImage(path) + if err != nil { + return &os.PathError{Op: title, Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go new file mode 100644 index 00000000000..71b130c525f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -0,0 +1,25 @@ +package wclayer + +import ( + "context" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +// UnprepareLayer disables the filesystem filter for the read-write layer with +// the given id. +func UnprepareLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::UnprepareLayer" + ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + err = unprepareLayer(&stdDriverInfo, path) + if err != nil { + return hcserror.New(err, title, "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go new file mode 100644 index 00000000000..9b1e06d50c5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -0,0 +1,35 @@ +// Package wclayer provides bindings to HCS's legacy layer management API and +// provides a higher level interface around these calls for container layer +// management. +package wclayer + +import "github.com/Microsoft/go-winio/pkg/guid" + +//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go + +//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? +//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? +//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? +//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? +//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? +//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? +//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? +//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? +//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? +//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? +//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? +//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? +//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid? +//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? +//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? +//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? +//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? + +//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess? + +//sys openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) [failretval != 0] = virtdisk.OpenVirtualDisk +//sys attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) [failretval != 0] = virtdisk.AttachVirtualDisk + +//sys getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) = GetDiskFreeSpaceExW + +type _guid = guid.GUID diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go new file mode 100644 index 00000000000..67f917f07e6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -0,0 +1,569 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package wclayer + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procActivateLayer = modvmcompute.NewProc("ActivateLayer") + procCopyLayer = modvmcompute.NewProc("CopyLayer") + procCreateLayer = modvmcompute.NewProc("CreateLayer") + procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") + procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") + procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") + procDestroyLayer = modvmcompute.NewProc("DestroyLayer") + procExportLayer = modvmcompute.NewProc("ExportLayer") + procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") + procGetBaseImages = modvmcompute.NewProc("GetBaseImages") + procImportLayer = modvmcompute.NewProc("ImportLayer") + procLayerExists = modvmcompute.NewProc("LayerExists") + procNameToGuid = modvmcompute.NewProc("NameToGuid") + procPrepareLayer = modvmcompute.NewProc("PrepareLayer") + procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") + procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") + procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") + procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") +) + +func activateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _activateLayer(info, _p0) +} + +func _activateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procActivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(srcId) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(dstId) + if hr != nil { + return + } + return _copyLayer(info, _p0, _p1, descriptors) +} + +func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procCopyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createLayer(info *driverInfo, id string, parent string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(parent) + if hr != nil { + return + } + return _createLayer(info, _p0, _p1) +} + +func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { + if hr = procCreateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _createSandboxLayer(info, _p0, parent, descriptors) +} + +func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procCreateSandboxLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _expandSandboxSize(info, _p0, size) +} + +func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { + if hr = procExpandSandboxSize.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func deactivateLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _deactivateLayer(info, _p0) +} + +func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDeactivateLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func destroyLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _destroyLayer(info, _p0) +} + +func _destroyLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procDestroyLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _exportLayer(info, _p0, _p1, descriptors) +} + +func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procExportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _getLayerMountPath(info, _p0, length, buffer) +} + +func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { + if hr = procGetLayerMountPath.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getBaseImages(buffer **uint16) (hr error) { + if hr = procGetBaseImages.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _importLayer(info, _p0, _p1, descriptors) +} + +func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p2 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p2 = &descriptors[0] + } + if hr = procImportLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _layerExists(info, _p0, exists) +} + +func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { + if hr = procLayerExists.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func nameToGuid(name string, guid *_guid) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(name) + if hr != nil { + return + } + return _nameToGuid(_p0, guid) +} + +func _nameToGuid(name *uint16, guid *_guid) (hr error) { + if hr = procNameToGuid.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _prepareLayer(info, _p0, descriptors) +} + +func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + var _p1 *WC_LAYER_DESCRIPTOR + if len(descriptors) > 0 { + _p1 = &descriptors[0] + } + if hr = procPrepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func unprepareLayer(info *driverInfo, id string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { + return + } + return _unprepareLayer(info, _p0) +} + +func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { + if hr = procUnprepareLayer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processBaseImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processBaseImage(_p0) +} + +func _processBaseImage(path *uint16) (hr error) { + if hr = procProcessBaseImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func processUtilityImage(path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _processUtilityImage(_p0) +} + +func _processUtilityImage(path *uint16) (hr error) { + if hr = procProcessUtilityImage.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func grantVmAccess(vmid string, filepath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(vmid) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(filepath) + if hr != nil { + return + } + return _grantVmAccess(_p0, _p1) +} + +func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { + if hr = procGrantVmAccess.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(directoryName) + if err != nil { + return + } + return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) +} + +func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go new file mode 100644 index 00000000000..def9525417e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -0,0 +1,44 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) +} + +// HRESULT WINAPI CreatePseudoConsole( +// _In_ COORD size, +// _In_ HANDLE hInput, +// _In_ HANDLE hOutput, +// _In_ DWORD dwFlags, +// _Out_ HPCON* phPC +// ); +// +//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole + +// void WINAPI ClosePseudoConsole( +// _In_ HPCON hPC +// ); +// +//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole + +// HRESULT WINAPI ResizePseudoConsole( +// _In_ HPCON hPC , +// _In_ COORD size +// ); +// +//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go new file mode 100644 index 00000000000..df28ea24216 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go @@ -0,0 +1,13 @@ +package winapi + +import "github.com/Microsoft/go-winio/pkg/guid" + +//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA +//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA +//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW +//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW + +type DevPropKey struct { + Fmtid guid.GUID + Pid uint32 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go new file mode 100644 index 00000000000..4e80ef68c92 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go @@ -0,0 +1,15 @@ +package winapi + +import "syscall" + +//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError + +const ( + STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B + ERROR_NO_MORE_ITEMS = 0x103 + ERROR_MORE_DATA syscall.Errno = 234 +) + +func NTSuccess(status uint32) bool { + return status == 0 +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go new file mode 100644 index 00000000000..7ce52afd5e1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go @@ -0,0 +1,110 @@ +package winapi + +//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile + +//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject +//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject + +const ( + FileLinkInformationClass = 11 + FileDispositionInformationExClass = 64 + + FILE_READ_ATTRIBUTES = 0x0080 + FILE_WRITE_ATTRIBUTES = 0x0100 + DELETE = 0x10000 + + FILE_OPEN = 1 + FILE_CREATE = 2 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_DIRECTORY_FILE = 0x00000001 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + + FILE_DISPOSITION_DELETE = 0x00000001 + + OBJ_DONT_REPARSE = 0x1000 + + STATUS_MORE_ENTRIES = 0x105 + STATUS_NO_MORE_ENTRIES = 0x8000001a +) + +// Select entries from FILE_INFO_BY_HANDLE_CLASS. +// +// C declaration: +// typedef enum _FILE_INFO_BY_HANDLE_CLASS { +// FileBasicInfo, +// FileStandardInfo, +// FileNameInfo, +// FileRenameInfo, +// FileDispositionInfo, +// FileAllocationInfo, +// FileEndOfFileInfo, +// FileStreamInfo, +// FileCompressionInfo, +// FileAttributeTagInfo, +// FileIdBothDirectoryInfo, +// FileIdBothDirectoryRestartInfo, +// FileIoPriorityHintInfo, +// FileRemoteProtocolInfo, +// FileFullDirectoryInfo, +// FileFullDirectoryRestartInfo, +// FileStorageInfo, +// FileAlignmentInfo, +// FileIdInfo, +// FileIdExtdDirectoryInfo, +// FileIdExtdDirectoryRestartInfo, +// FileDispositionInfoEx, +// FileRenameInfoEx, +// FileCaseSensitiveInfo, +// FileNormalizedNameInfo, +// MaximumFileInfoByHandleClass +// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class +const ( + FileIdInfo = 18 +) + +type FileDispositionInformationEx struct { + Flags uintptr +} + +type IOStatusBlock struct { + Status, Information uintptr +} + +type ObjectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *UnicodeString + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type ObjectDirectoryInformation struct { + Name UnicodeString + TypeName UnicodeString +} + +type FileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +// C declaration: +// typedef struct _FILE_ID_INFO { +// ULONGLONG VolumeSerialNumber; +// FILE_ID_128 FileId; +// } FILE_ID_INFO, *PFILE_ID_INFO; +// +// Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info +type FILE_ID_INFO struct { + VolumeSerialNumber uint64 + FileID [16]byte +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go new file mode 100644 index 00000000000..4e609cbf1cd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go @@ -0,0 +1,3 @@ +package winapi + +//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go new file mode 100644 index 00000000000..ba12b1ad92e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -0,0 +1,215 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// Messages that can be received from an assigned io completion port. +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +const ( + JOB_OBJECT_MSG_END_OF_JOB_TIME uint32 = 1 + JOB_OBJECT_MSG_END_OF_PROCESS_TIME uint32 = 2 + JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT uint32 = 3 + JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO uint32 = 4 + JOB_OBJECT_MSG_NEW_PROCESS uint32 = 6 + JOB_OBJECT_MSG_EXIT_PROCESS uint32 = 7 + JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS uint32 = 8 + JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT uint32 = 9 + JOB_OBJECT_MSG_JOB_MEMORY_LIMIT uint32 = 10 + JOB_OBJECT_MSG_NOTIFICATION_LIMIT uint32 = 11 +) + +// Access rights for creating or opening job objects. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights +const JOB_OBJECT_ALL_ACCESS = 0x1F001F + +// IO limit flags +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1 + +const JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE uint32 = 0x1 + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +const ( + JOB_OBJECT_CPU_RATE_CONTROL_ENABLE uint32 = 1 << iota + JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP + JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY + JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE +) + +// JobObjectInformationClass values. Used for a call to QueryInformationJobObject +// +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject +const ( + JobObjectBasicAccountingInformation uint32 = 1 + JobObjectBasicProcessIdList uint32 = 3 + JobObjectBasicAndIoAccountingInformation uint32 = 8 + JobObjectLimitViolationInformation uint32 = 13 + JobObjectMemoryUsageInformation uint32 = 28 + JobObjectNotificationLimitInformation2 uint32 = 33 + JobObjectIoAttribution uint32 = 42 +) + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information +type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct { + ControlFlags uint32 + Value uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information +type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct { + MaxIops int64 + MaxBandwidth int64 + ReservationIops int64 + BaseIOSize uint32 + VolumeName string + ControlFlags uint32 +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list +type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { + NumberOfAssignedProcesses uint32 + NumberOfProcessIdsInList uint32 + ProcessIdList [1]uintptr +} + +// AllPids returns all the process Ids in the job object. +func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { + return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information +type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { + TotalUserTime int64 + TotalKernelTime int64 + ThisPeriodTotalUserTime int64 + ThisPeriodTotalKernelTime int64 + TotalPageFaultCount uint32 + TotalProcesses uint32 + ActiveProcesses uint32 + TotalTerminateProcesses uint32 +} + +//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information +type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { + BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION + IoInfo windows.IO_COUNTERS +} + +// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { +// ULONG64 JobMemory; +// ULONG64 PeakJobMemoryUsed; +// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; +// +type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { + JobMemory uint64 + PeakJobMemoryUsed uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { +// ULONG_PTR IoCount; +// ULONGLONG TotalNonOverlappedQueueTime; +// ULONGLONG TotalNonOverlappedServiceTime; +// ULONGLONG TotalSize; +// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; +// +type JOBOBJECT_IO_ATTRIBUTION_STATS struct { + IoCount uintptr + TotalNonOverlappedQueueTime uint64 + TotalNonOverlappedServiceTime uint64 + TotalSize uint64 +} + +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { +// ULONG ControlFlags; +// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; +// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; +// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; +// +type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { + ControlFlags uint32 + ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS + WriteStats JOBOBJECT_IO_ATTRIBUTION_STATS +} + +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port +type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { + CompletionKey windows.Handle + CompletionPort windows.Handle +} + +// BOOL IsProcessInJob( +// HANDLE ProcessHandle, +// HANDLE JobHandle, +// PBOOL Result +// ); +// +//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob + +// BOOL QueryInformationJobObject( +// HANDLE hJob, +// JOBOBJECTINFOCLASS JobObjectInformationClass, +// LPVOID lpJobObjectInformation, +// DWORD cbJobObjectInformationLength, +// LPDWORD lpReturnLength +// ); +// +//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject + +// HANDLE OpenJobObjectW( +// DWORD dwDesiredAccess, +// BOOL bInheritHandle, +// LPCWSTR lpName +// ); +// +//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW + +// DWORD SetIoRateControlInformationJobObject( +// HANDLE hJob, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo +// ); +// +//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject + +// DWORD QueryIoRateControlInformationJobObject( +// HANDLE hJob, +// PCWSTR VolumeName, +// JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, +// ULONG *InfoBlockCount +// ); +//sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject + +// NTSTATUS +// NtOpenJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject + +// NTSTATUS +// NTAPI +// NtCreateJobObject ( +// _Out_ PHANDLE JobHandle, +// _In_ ACCESS_MASK DesiredAccess, +// _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes +// ); +//sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go new file mode 100644 index 00000000000..b6e7cfd4601 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/logon.go @@ -0,0 +1,30 @@ +package winapi + +// BOOL LogonUserA( +// LPCWSTR lpszUsername, +// LPCWSTR lpszDomain, +// LPCWSTR lpszPassword, +// DWORD dwLogonType, +// DWORD dwLogonProvider, +// PHANDLE phToken +// ); +// +//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW + +// Logon types +const ( + LOGON32_LOGON_INTERACTIVE uint32 = 2 + LOGON32_LOGON_NETWORK uint32 = 3 + LOGON32_LOGON_BATCH uint32 = 4 + LOGON32_LOGON_SERVICE uint32 = 5 + LOGON32_LOGON_UNLOCK uint32 = 7 + LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8 + LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9 +) + +// Logon providers +const ( + LOGON32_PROVIDER_DEFAULT uint32 = 0 + LOGON32_PROVIDER_WINNT40 uint32 = 2 + LOGON32_PROVIDER_WINNT50 uint32 = 3 +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go new file mode 100644 index 00000000000..53f62948c90 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -0,0 +1,4 @@ +package winapi + +//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys LocalFree(ptr uintptr) = kernel32.LocalFree diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go new file mode 100644 index 00000000000..f37910024f7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/net.go @@ -0,0 +1,3 @@ +package winapi + +//sys SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) = iphlpapi.SetJobCompartmentId diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go new file mode 100644 index 00000000000..908920e8722 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go @@ -0,0 +1,11 @@ +package winapi + +// DWORD SearchPathW( +// LPCWSTR lpPath, +// LPCWSTR lpFileName, +// LPCWSTR lpExtension, +// DWORD nBufferLength, +// LPWSTR lpBuffer, +// LPWSTR *lpFilePart +// ); +//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go new file mode 100644 index 00000000000..37839435b93 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -0,0 +1,8 @@ +package winapi + +const PROCESS_ALL_ACCESS uint32 = 2097151 + +const ( + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 + PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go new file mode 100644 index 00000000000..ce79ac2cdb8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/processor.go @@ -0,0 +1,7 @@ +package winapi + +// Get count from all processor groups. +// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups +const ALL_PROCESSOR_GROUPS = 0xFFFF + +//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go new file mode 100644 index 00000000000..327f57d7c29 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -0,0 +1,52 @@ +package winapi + +import "golang.org/x/sys/windows" + +const SystemProcessInformation = 5 + +const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 + +// __kernel_entry NTSTATUS NtQuerySystemInformation( +// SYSTEM_INFORMATION_CLASS SystemInformationClass, +// PVOID SystemInformation, +// ULONG SystemInformationLength, +// PULONG ReturnLength +// ); +//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation + +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 // ULONG + NumberOfThreads uint32 // ULONG + WorkingSetPrivateSize int64 // LARGE_INTEGER + HardFaultCount uint32 // ULONG + NumberOfThreadsHighWatermark uint32 // ULONG + CycleTime uint64 // ULONGLONG + CreateTime int64 // LARGE_INTEGER + UserTime int64 // LARGE_INTEGER + KernelTime int64 // LARGE_INTEGER + ImageName UnicodeString // UNICODE_STRING + BasePriority int32 // KPRIORITY + UniqueProcessID windows.Handle // HANDLE + InheritedFromUniqueProcessID windows.Handle // HANDLE + HandleCount uint32 // ULONG + SessionID uint32 // ULONG + UniqueProcessKey *uint32 // ULONG_PTR + PeakVirtualSize uintptr // SIZE_T + VirtualSize uintptr // SIZE_T + PageFaultCount uint32 // ULONG + PeakWorkingSetSize uintptr // SIZE_T + WorkingSetSize uintptr // SIZE_T + QuotaPeakPagedPoolUsage uintptr // SIZE_T + QuotaPagedPoolUsage uintptr // SIZE_T + QuotaPeakNonPagedPoolUsage uintptr // SIZE_T + QuotaNonPagedPoolUsage uintptr // SIZE_T + PagefileUsage uintptr // SIZE_T + PeakPagefileUsage uintptr // SIZE_T + PrivatePageCount uintptr // SIZE_T + ReadOperationCount int64 // LARGE_INTEGER + WriteOperationCount int64 // LARGE_INTEGER + OtherOperationCount int64 // LARGE_INTEGER + ReadTransferCount int64 // LARGE_INTEGER + WriteTransferCount int64 // LARGE_INTEGER + OtherTransferCount int64 // LARGE_INTEGER +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go new file mode 100644 index 00000000000..4724713e3e4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go @@ -0,0 +1,12 @@ +package winapi + +// HANDLE CreateRemoteThread( +// HANDLE hProcess, +// LPSECURITY_ATTRIBUTES lpThreadAttributes, +// SIZE_T dwStackSize, +// LPTHREAD_START_ROUTINE lpStartAddress, +// LPVOID lpParameter, +// DWORD dwCreationFlags, +// LPDWORD lpThreadId +// ); +//sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go new file mode 100644 index 00000000000..859b753c246 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -0,0 +1,80 @@ +package winapi + +import ( + "errors" + "reflect" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice +// for easier interop with Go APIs +func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) + hdr.Data = uintptr(unsafe.Pointer(buffer)) + hdr.Cap = bufferLength + hdr.Len = bufferLength + + return +} + +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string +type UnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + +//String converts a UnicodeString to a golang string +func (uni UnicodeString) String() string { + // UnicodeString is not guaranteed to be null terminated, therefore + // use the UnicodeString's Length field + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) +} + +// NewUnicodeString allocates a new UnicodeString and copies `s` into +// the buffer of the new UnicodeString. +func NewUnicodeString(s string) (*UnicodeString, error) { + buf, err := windows.UTF16FromString(s) + if err != nil { + return nil, err + } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + + uni := &UnicodeString{ + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), + Buffer: &buf[0], + } + return uni, nil +} + +// ConvertStringSetToSlice is a helper function used to convert the contents of +// `buf` into a string slice. `buf` contains a set of null terminated strings +// with an additional null at the end to indicate the end of the set. +func ConvertStringSetToSlice(buf []byte) ([]string, error) { + var results []string + prev := 0 + for i := range buf { + if buf[i] == 0 { + if prev == i { + // found two null characters in a row, return result + return results, nil + } + results = append(results, string(buf[prev:i])) + prev = i + 1 + } + } + return nil, errors.New("string set malformed: missing null terminator at end of buffer") +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go new file mode 100644 index 00000000000..1d4ba3c4f8e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -0,0 +1,5 @@ +// Package winapi contains various low-level bindings to Windows APIs. It can +// be thought of as an extension to golang.org/x/sys/windows. +package winapi + +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go new file mode 100644 index 00000000000..4eb64b4c0c4 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -0,0 +1,360 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package winapi + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") + procSearchPathW = modkernel32.NewProc("SearchPathW") + procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") + procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") + procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") + procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") + procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") + procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") + procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") + procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") + procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") +) + +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + return +} + +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { + r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + if r0 != 0 { + win32Err = syscall.Errno(r0) + } + return +} + +func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + size = uint32(r0) + if size == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { + r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) + handle = windows.Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + ret = uint32(r0) + if ret == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func LocalFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + amount = uint32(r0) + return +} + +func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(pDeviceID) + if hr != nil { + return + } + return _CMLocateDevNode(pdnDevInst, _p0, uFlags) +} + +func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + status = uint32(r0) + return +} + +func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { + var _p0 uint32 + if singleEntry { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if restartScan { + _p1 = 1 + } else { + _p1 = 0 + } + r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func RtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go new file mode 100644 index 00000000000..8916163706c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/layer.go @@ -0,0 +1,107 @@ +package hcsshim + +import ( + "context" + "crypto/sha1" + "path/filepath" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/wclayer" +) + +func layerPath(info *DriverInfo, id string) string { + return filepath.Join(info.HomeDir, id) +} + +func ActivateLayer(info DriverInfo, id string) error { + return wclayer.ActivateLayer(context.Background(), layerPath(&info, id)) +} +func CreateLayer(info DriverInfo, id, parent string) error { + return wclayer.CreateLayer(context.Background(), layerPath(&info, id), parent) +} + +// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility. +func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { + return wclayer.CreateScratchLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func DeactivateLayer(info DriverInfo, id string) error { + return wclayer.DeactivateLayer(context.Background(), layerPath(&info, id)) +} +func DestroyLayer(info DriverInfo, id string) error { + return wclayer.DestroyLayer(context.Background(), layerPath(&info, id)) +} + +// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility. +func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) +} +func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error { + return wclayer.ExpandScratchSize(context.Background(), layerPath(&info, layerId), size) +} +func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { + return wclayer.ExportLayer(context.Background(), layerPath(&info, layerId), exportFolderPath, parentLayerPaths) +} +func GetLayerMountPath(info DriverInfo, id string) (string, error) { + return wclayer.GetLayerMountPath(context.Background(), layerPath(&info, id)) +} +func GetSharedBaseImages() (imageData string, err error) { + return wclayer.GetSharedBaseImages(context.Background()) +} +func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { + return wclayer.ImportLayer(context.Background(), layerPath(&info, layerID), importFolderPath, parentLayerPaths) +} +func LayerExists(info DriverInfo, id string) (bool, error) { + return wclayer.LayerExists(context.Background(), layerPath(&info, id)) +} +func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { + return wclayer.PrepareLayer(context.Background(), layerPath(&info, layerId), parentLayerPaths) +} +func ProcessBaseLayer(path string) error { + return wclayer.ProcessBaseLayer(context.Background(), path) +} +func ProcessUtilityVMImage(path string) error { + return wclayer.ProcessUtilityVMImage(context.Background(), path) +} +func UnprepareLayer(info DriverInfo, layerId string) error { + return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) +} + +type DriverInfo struct { + Flavour int + HomeDir string +} + +type GUID [16]byte + +func NameToGuid(name string) (id GUID, err error) { + g, err := wclayer.NameToGuid(context.Background(), name) + return g.ToWindowsArray(), err +} + +func NewGUID(source string) *GUID { + h := sha1.Sum([]byte(source)) + var g GUID + copy(g[0:], h[0:16]) + return &g +} + +func (g *GUID) ToString() string { + return guid.FromWindowsArray(*g).String() +} + +type LayerReader = wclayer.LayerReader + +func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { + return wclayer.NewLayerReader(context.Background(), layerPath(&info, layerID), parentLayerPaths) +} + +type LayerWriter = wclayer.LayerWriter + +func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { + return wclayer.NewLayerWriter(context.Background(), layerPath(&info, layerID), parentLayerPaths) +} + +type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 00000000000..3ab3bcd89a1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,50 @@ +package osversion + +import ( + "fmt" + "sync" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +var ( + osv OSVersion + once sync.Once +) + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + once.Do(func() { + var err error + osv = OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + }) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 00000000000..75dce5d821d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,50 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + + // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual + // channel). + V19H1 = 18362 + + // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + + // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 +) diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go new file mode 100644 index 00000000000..3362c683357 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/process.go @@ -0,0 +1,98 @@ +package hcsshim + +import ( + "context" + "io" + "sync" + "time" + + "github.com/Microsoft/hcsshim/internal/hcs" +) + +// ContainerError is an error encountered in HCS +type process struct { + p *hcs.Process + waitOnce sync.Once + waitCh chan struct{} + waitErr error +} + +// Pid returns the process ID of the process within the container. +func (process *process) Pid() int { + return process.p.Pid() +} + +// Kill signals the process to terminate but does not wait for it to finish terminating. +func (process *process) Kill() error { + found, err := process.p.Kill(context.Background()) + if err != nil { + return convertProcessError(err, process) + } + if !found { + return &ProcessError{Process: process, Err: ErrElementNotFound, Operation: "hcsshim::Process::Kill"} + } + return nil +} + +// Wait waits for the process to exit. +func (process *process) Wait() error { + return convertProcessError(process.p.Wait(), process) +} + +// WaitTimeout waits for the process to exit or the duration to elapse. It returns +// false if timeout occurs. +func (process *process) WaitTimeout(timeout time.Duration) error { + process.waitOnce.Do(func() { + process.waitCh = make(chan struct{}) + go func() { + process.waitErr = process.Wait() + close(process.waitCh) + }() + }) + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-t.C: + return &ProcessError{Process: process, Err: ErrTimeout, Operation: "hcsshim::Process::Wait"} + case <-process.waitCh: + return process.waitErr + } +} + +// ExitCode returns the exit code of the process. The process must have +// already terminated. +func (process *process) ExitCode() (int, error) { + code, err := process.p.ExitCode() + if err != nil { + err = convertProcessError(err, process) + } + return code, err +} + +// ResizeConsole resizes the console of the process. +func (process *process) ResizeConsole(width, height uint16) error { + return convertProcessError(process.p.ResizeConsole(context.Background(), width, height), process) +} + +// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing +// these pipes does not close the underlying pipes; it should be possible to +// call this multiple times to get multiple interfaces. +func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { + stdin, stdout, stderr, err := process.p.StdioLegacy() + if err != nil { + err = convertProcessError(err, process) + } + return stdin, stdout, stderr, err +} + +// CloseStdin closes the write side of the stdin pipe so that the process is +// notified on the read side that there is no more data in stdin. +func (process *process) CloseStdin() error { + return convertProcessError(process.p.CloseStdin(context.Background()), process) +} + +// Close cleans up any state associated with the process but does not kill +// or wait on it. +func (process *process) Close() error { + return convertProcessError(process.p.Close(), process) +} diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go new file mode 100644 index 00000000000..8bed8485738 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -0,0 +1,54 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hcsshim + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") +) + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore new file mode 100644 index 00000000000..a3062beae38 --- /dev/null +++ b/vendor/github.com/chzyer/readline/.gitignore @@ -0,0 +1 @@ +.vscode/* diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml new file mode 100644 index 00000000000..9c359554320 --- /dev/null +++ b/vendor/github.com/chzyer/readline/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.x +script: + - GOOS=windows go install github.com/chzyer/readline/example/... + - GOOS=linux go install github.com/chzyer/readline/example/... + - GOOS=darwin go install github.com/chzyer/readline/example/... + - go test -race -v diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md new file mode 100644 index 00000000000..14ff5be1313 --- /dev/null +++ b/vendor/github.com/chzyer/readline/CHANGELOG.md @@ -0,0 +1,58 @@ +# ChangeLog + +### 1.4 - 2016-07-25 + +* [#60][60] Support dynamic autocompletion +* Fix ANSI parser on Windows +* Fix wrong column width in complete mode on Windows +* Remove dependent package "golang.org/x/crypto/ssh/terminal" + +### 1.3 - 2016-05-09 + +* [#38][38] add SetChildren for prefix completer interface +* [#42][42] improve multiple lines compatibility +* [#43][43] remove sub-package(runes) for gopkg compatibility +* [#46][46] Auto complete with space prefixed line +* [#48][48] support suspend process (ctrl+Z) +* [#49][49] fix bug that check equals with previous command +* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty + +### 1.2 - 2016-03-05 + +* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib) +* [#23][23], support stdin remapping +* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM. +* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines. +* Supports performs even stdin/stdout is not a tty. +* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api. +* [#28][28], fixes the history is not working as expected. +* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)` + +### 1.1 - 2015-11-20 + +* [#12][12] Add support for key ``/``/`` +* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`. +* Bugs fixed for `PrefixCompleter` +* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience. +* Customable Interrupt/EOF prompt in `Config` +* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices +* Provides a new password user experience(`readline.ReadPasswordEx()`). + +### 1.0 - 2015-10-14 + +* Initial public release. + +[12]: https://github.com/chzyer/readline/pull/12 +[17]: https://github.com/chzyer/readline/pull/17 +[23]: https://github.com/chzyer/readline/pull/23 +[27]: https://github.com/chzyer/readline/pull/27 +[28]: https://github.com/chzyer/readline/pull/28 +[33]: https://github.com/chzyer/readline/pull/33 +[38]: https://github.com/chzyer/readline/pull/38 +[42]: https://github.com/chzyer/readline/pull/42 +[43]: https://github.com/chzyer/readline/pull/43 +[46]: https://github.com/chzyer/readline/pull/46 +[48]: https://github.com/chzyer/readline/pull/48 +[49]: https://github.com/chzyer/readline/pull/49 +[53]: https://github.com/chzyer/readline/pull/53 +[60]: https://github.com/chzyer/readline/pull/60 diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE new file mode 100644 index 00000000000..c9afab3dcd0 --- /dev/null +++ b/vendor/github.com/chzyer/readline/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Chzyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md new file mode 100644 index 00000000000..fab974b7f34 --- /dev/null +++ b/vendor/github.com/chzyer/readline/README.md @@ -0,0 +1,114 @@ +[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md) +[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases) +[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline) +[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers) +[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors) + +

+ + + +

+ +A powerful readline library in `Linux` `macOS` `Windows` `Solaris` + +## Guide + +* [Demo](example/readline-demo/readline-demo.go) +* [Shortcut](doc/shortcut.md) + +## Repos using readline + +[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach) +[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto) +[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire) +[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg) +[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql) +[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman) +[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp) +[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell) +[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001) +[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p) + + +## Feedback + +If you have any questions, please submit a github issue and any pull requests is welcomed :) + +* [https://twitter.com/chzyer](https://twitter.com/chzyer) +* [http://weibo.com/2145262190](http://weibo.com/2145262190) + + +## Backers + +Love Readline? Help me keep it alive by donating funds to cover project expenses!
+[[Become a backer](https://opencollective.com/readline#backer)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Sponsors + +Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go new file mode 100644 index 00000000000..63b908c187a --- /dev/null +++ b/vendor/github.com/chzyer/readline/ansi_windows.go @@ -0,0 +1,249 @@ +// +build windows + +package readline + +import ( + "bufio" + "io" + "strconv" + "strings" + "sync" + "unicode/utf8" + "unsafe" +) + +const ( + _ = uint16(0) + COLOR_FBLUE = 0x0001 + COLOR_FGREEN = 0x0002 + COLOR_FRED = 0x0004 + COLOR_FINTENSITY = 0x0008 + + COLOR_BBLUE = 0x0010 + COLOR_BGREEN = 0x0020 + COLOR_BRED = 0x0040 + COLOR_BINTENSITY = 0x0080 + + COMMON_LVB_UNDERSCORE = 0x8000 + COMMON_LVB_BOLD = 0x0007 +) + +var ColorTableFg = []word{ + 0, // 30: Black + COLOR_FRED, // 31: Red + COLOR_FGREEN, // 32: Green + COLOR_FRED | COLOR_FGREEN, // 33: Yellow + COLOR_FBLUE, // 34: Blue + COLOR_FRED | COLOR_FBLUE, // 35: Magenta + COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan + COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White +} + +var ColorTableBg = []word{ + 0, // 40: Black + COLOR_BRED, // 41: Red + COLOR_BGREEN, // 42: Green + COLOR_BRED | COLOR_BGREEN, // 43: Yellow + COLOR_BBLUE, // 44: Blue + COLOR_BRED | COLOR_BBLUE, // 45: Magenta + COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan + COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White +} + +type ANSIWriter struct { + target io.Writer + wg sync.WaitGroup + ctx *ANSIWriterCtx + sync.Mutex +} + +func NewANSIWriter(w io.Writer) *ANSIWriter { + a := &ANSIWriter{ + target: w, + ctx: NewANSIWriterCtx(w), + } + return a +} + +func (a *ANSIWriter) Close() error { + a.wg.Wait() + return nil +} + +type ANSIWriterCtx struct { + isEsc bool + isEscSeq bool + arg []string + target *bufio.Writer + wantFlush bool +} + +func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx { + return &ANSIWriterCtx{ + target: bufio.NewWriter(target), + } +} + +func (a *ANSIWriterCtx) Flush() { + a.target.Flush() +} + +func (a *ANSIWriterCtx) process(r rune) bool { + if a.wantFlush { + if r == 0 || r == CharEsc { + a.wantFlush = false + a.target.Flush() + } + } + if a.isEscSeq { + a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg) + return true + } + + switch r { + case CharEsc: + a.isEsc = true + case '[': + if a.isEsc { + a.arg = nil + a.isEscSeq = true + a.isEsc = false + break + } + fallthrough + default: + a.target.WriteRune(r) + a.wantFlush = true + } + return true +} + +func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool { + arg := *argptr + var err error + + if r >= 'A' && r <= 'D' { + count := short(GetInt(arg, 1)) + info, err := GetConsoleScreenBufferInfo() + if err != nil { + return false + } + switch r { + case 'A': // up + info.dwCursorPosition.y -= count + case 'B': // down + info.dwCursorPosition.y += count + case 'C': // right + info.dwCursorPosition.x += count + case 'D': // left + info.dwCursorPosition.x -= count + } + SetConsoleCursorPosition(&info.dwCursorPosition) + return false + } + + switch r { + case 'J': + killLines() + case 'K': + eraseLine() + case 'm': + color := word(0) + for _, item := range arg { + var c int + c, err = strconv.Atoi(item) + if err != nil { + w.WriteString("[" + strings.Join(arg, ";") + "m") + break + } + if c >= 30 && c < 40 { + color ^= COLOR_FINTENSITY + color |= ColorTableFg[c-30] + } else if c >= 40 && c < 50 { + color ^= COLOR_BINTENSITY + color |= ColorTableBg[c-40] + } else if c == 4 { + color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7] + } else if c == 1 { + color |= COMMON_LVB_BOLD | COLOR_FINTENSITY + } else { // unknown code treat as reset + color = ColorTableFg[7] + } + } + if err != nil { + break + } + kernel.SetConsoleTextAttribute(stdout, uintptr(color)) + case '\007': // set title + case ';': + if len(arg) == 0 || arg[len(arg)-1] != "" { + arg = append(arg, "") + *argptr = arg + } + return true + default: + if len(arg) == 0 { + arg = append(arg, "") + } + arg[len(arg)-1] += string(r) + *argptr = arg + return true + } + *argptr = nil + return false +} + +func (a *ANSIWriter) Write(b []byte) (int, error) { + a.Lock() + defer a.Unlock() + + off := 0 + for len(b) > off { + r, size := utf8.DecodeRune(b[off:]) + if size == 0 { + return off, io.ErrShortWrite + } + off += size + a.ctx.process(r) + } + a.ctx.Flush() + return off, nil +} + +func killLines() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x + size += sbi.dwCursorPosition.x + + var written int + kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} + +func eraseLine() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := sbi.dwSize.x + sbi.dwCursorPosition.x = 0 + var written int + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go new file mode 100644 index 00000000000..c08c994141e --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete.go @@ -0,0 +1,285 @@ +package readline + +import ( + "bufio" + "bytes" + "fmt" + "io" +) + +type AutoCompleter interface { + // Readline will pass the whole line and current offset to it + // Completer need to pass all the candidates, and how long they shared the same characters in line + // Example: + // [go, git, git-shell, grep] + // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1 + // Do("gi", 2) => ["t", "t-shell"], 2 + // Do("git", 3) => ["", "-shell"], 3 + Do(line []rune, pos int) (newLine [][]rune, length int) +} + +type TabCompleter struct{} + +func (t *TabCompleter) Do([]rune, int) ([][]rune, int) { + return [][]rune{[]rune("\t")}, 0 +} + +type opCompleter struct { + w io.Writer + op *Operation + width int + + inCompleteMode bool + inSelectMode bool + candidate [][]rune + candidateSource []rune + candidateOff int + candidateChoise int + candidateColNum int +} + +func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter { + return &opCompleter{ + w: w, + op: op, + width: width, + } +} + +func (o *opCompleter) doSelect() { + if len(o.candidate) == 1 { + o.op.buf.WriteRunes(o.candidate[0]) + o.ExitCompleteMode(false) + return + } + o.nextCandidate(1) + o.CompleteRefresh() +} + +func (o *opCompleter) nextCandidate(i int) { + o.candidateChoise += i + o.candidateChoise = o.candidateChoise % len(o.candidate) + if o.candidateChoise < 0 { + o.candidateChoise = len(o.candidate) + o.candidateChoise + } +} + +func (o *opCompleter) OnComplete() bool { + if o.width == 0 { + return false + } + if o.IsInCompleteSelectMode() { + o.doSelect() + return true + } + + buf := o.op.buf + rs := buf.Runes() + + if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) { + o.EnterCompleteSelectMode() + o.doSelect() + return true + } + + o.ExitCompleteSelectMode() + o.candidateSource = rs + newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx) + if len(newLines) == 0 { + o.ExitCompleteMode(false) + return true + } + + // only Aggregate candidates in non-complete mode + if !o.IsInCompleteMode() { + if len(newLines) == 1 { + buf.WriteRunes(newLines[0]) + o.ExitCompleteMode(false) + return true + } + + same, size := runes.Aggregate(newLines) + if size > 0 { + buf.WriteRunes(same) + o.ExitCompleteMode(false) + return true + } + } + + o.EnterCompleteMode(offset, newLines) + return true +} + +func (o *opCompleter) IsInCompleteSelectMode() bool { + return o.inSelectMode +} + +func (o *opCompleter) IsInCompleteMode() bool { + return o.inCompleteMode +} + +func (o *opCompleter) HandleCompleteSelect(r rune) bool { + next := true + switch r { + case CharEnter, CharCtrlJ: + next = false + o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise]) + o.ExitCompleteMode(false) + case CharLineStart: + num := o.candidateChoise % o.candidateColNum + o.nextCandidate(-num) + case CharLineEnd: + num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1 + o.candidateChoise += num + if o.candidateChoise >= len(o.candidate) { + o.candidateChoise = len(o.candidate) - 1 + } + case CharBackspace: + o.ExitCompleteSelectMode() + next = false + case CharTab, CharForward: + o.doSelect() + case CharBell, CharInterrupt: + o.ExitCompleteMode(true) + next = false + case CharNext: + tmpChoise := o.candidateChoise + o.candidateColNum + if tmpChoise >= o.getMatrixSize() { + tmpChoise -= o.getMatrixSize() + } else if tmpChoise >= len(o.candidate) { + tmpChoise += o.candidateColNum + tmpChoise -= o.getMatrixSize() + } + o.candidateChoise = tmpChoise + case CharBackward: + o.nextCandidate(-1) + case CharPrev: + tmpChoise := o.candidateChoise - o.candidateColNum + if tmpChoise < 0 { + tmpChoise += o.getMatrixSize() + if tmpChoise >= len(o.candidate) { + tmpChoise -= o.candidateColNum + } + } + o.candidateChoise = tmpChoise + default: + next = false + o.ExitCompleteSelectMode() + } + if next { + o.CompleteRefresh() + return true + } + return false +} + +func (o *opCompleter) getMatrixSize() int { + line := len(o.candidate) / o.candidateColNum + if len(o.candidate)%o.candidateColNum != 0 { + line++ + } + return line * o.candidateColNum +} + +func (o *opCompleter) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opCompleter) CompleteRefresh() { + if !o.inCompleteMode { + return + } + lineCnt := o.op.buf.CursorLineCount() + colWidth := 0 + for _, c := range o.candidate { + w := runes.WidthAll(c) + if w > colWidth { + colWidth = w + } + } + colWidth += o.candidateOff + 1 + same := o.op.buf.RuneSlice(-o.candidateOff) + + // -1 to avoid reach the end of line + width := o.width - 1 + colNum := width / colWidth + if colNum != 0 { + colWidth += (width - (colWidth * colNum)) / colNum + } + + o.candidateColNum = colNum + buf := bufio.NewWriter(o.w) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + + colIdx := 0 + lines := 1 + buf.WriteString("\033[J") + for idx, c := range o.candidate { + inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode() + if inSelect { + buf.WriteString("\033[30;47m") + } + buf.WriteString(string(same)) + buf.WriteString(string(c)) + buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same))) + + if inSelect { + buf.WriteString("\033[0m") + } + + colIdx++ + if colIdx == colNum { + buf.WriteString("\n") + lines++ + colIdx = 0 + } + } + + // move back + fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines) + fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen()) + buf.Flush() +} + +func (o *opCompleter) aggCandidate(candidate [][]rune) int { + offset := 0 + for i := 0; i < len(candidate[0]); i++ { + for j := 0; j < len(candidate)-1; j++ { + if i > len(candidate[j]) { + goto aggregate + } + if candidate[j][i] != candidate[j+1][i] { + goto aggregate + } + } + offset = i + } +aggregate: + return offset +} + +func (o *opCompleter) EnterCompleteSelectMode() { + o.inSelectMode = true + o.candidateChoise = -1 + o.CompleteRefresh() +} + +func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) { + o.inCompleteMode = true + o.candidate = candidate + o.candidateOff = offset + o.CompleteRefresh() +} + +func (o *opCompleter) ExitCompleteSelectMode() { + o.inSelectMode = false + o.candidate = nil + o.candidateChoise = -1 + o.candidateOff = -1 + o.candidateSource = nil +} + +func (o *opCompleter) ExitCompleteMode(revent bool) { + o.inCompleteMode = false + o.ExitCompleteSelectMode() +} diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go new file mode 100644 index 00000000000..58d724872bf --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_helper.go @@ -0,0 +1,165 @@ +package readline + +import ( + "bytes" + "strings" +) + +// Caller type for dynamic completion +type DynamicCompleteFunc func(string) []string + +type PrefixCompleterInterface interface { + Print(prefix string, level int, buf *bytes.Buffer) + Do(line []rune, pos int) (newLine [][]rune, length int) + GetName() []rune + GetChildren() []PrefixCompleterInterface + SetChildren(children []PrefixCompleterInterface) +} + +type DynamicPrefixCompleterInterface interface { + PrefixCompleterInterface + IsDynamic() bool + GetDynamicNames(line []rune) [][]rune +} + +type PrefixCompleter struct { + Name []rune + Dynamic bool + Callback DynamicCompleteFunc + Children []PrefixCompleterInterface +} + +func (p *PrefixCompleter) Tree(prefix string) string { + buf := bytes.NewBuffer(nil) + p.Print(prefix, 0, buf) + return buf.String() +} + +func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) { + if strings.TrimSpace(string(p.GetName())) != "" { + buf.WriteString(prefix) + if level > 0 { + buf.WriteString("├") + buf.WriteString(strings.Repeat("─", (level*4)-2)) + buf.WriteString(" ") + } + buf.WriteString(string(p.GetName()) + "\n") + level++ + } + for _, ch := range p.GetChildren() { + ch.Print(prefix, level, buf) + } +} + +func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) { + Print(p, prefix, level, buf) +} + +func (p *PrefixCompleter) IsDynamic() bool { + return p.Dynamic +} + +func (p *PrefixCompleter) GetName() []rune { + return p.Name +} + +func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune { + var names = [][]rune{} + for _, name := range p.Callback(string(line)) { + names = append(names, []rune(name+" ")) + } + return names +} + +func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface { + return p.Children +} + +func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) { + p.Children = children +} + +func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter { + return PcItem("", pc...) +} + +func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter { + name += " " + return &PrefixCompleter{ + Name: []rune(name), + Dynamic: false, + Children: pc, + } +} + +func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter { + return &PrefixCompleter{ + Callback: callback, + Dynamic: true, + Children: pc, + } +} + +func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) { + line = runes.TrimSpaceLeft(line[:pos]) + goNext := false + var lineCompleter PrefixCompleterInterface + for _, child := range p.GetChildren() { + childNames := make([][]rune, 1) + + childDynamic, ok := child.(DynamicPrefixCompleterInterface) + if ok && childDynamic.IsDynamic() { + childNames = childDynamic.GetDynamicNames(origLine) + } else { + childNames[0] = child.GetName() + } + + for _, childName := range childNames { + if len(line) >= len(childName) { + if runes.HasPrefix(line, childName) { + if len(line) == len(childName) { + newLine = append(newLine, []rune{' '}) + } else { + newLine = append(newLine, childName) + } + offset = len(childName) + lineCompleter = child + goNext = true + } + } else { + if runes.HasPrefix(childName, line) { + newLine = append(newLine, childName[len(line):]) + offset = len(line) + lineCompleter = child + } + } + } + } + + if len(newLine) != 1 { + return + } + + tmpLine := make([]rune, 0, len(line)) + for i := offset; i < len(line); i++ { + if line[i] == ' ' { + continue + } + + tmpLine = append(tmpLine, line[i:]...) + return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine) + } + + if goNext { + return doInternal(lineCompleter, nil, 0, origLine) + } + return +} diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go new file mode 100644 index 00000000000..5ceadd80f97 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_segment.go @@ -0,0 +1,82 @@ +package readline + +type SegmentCompleter interface { + // a + // |- a1 + // |--- a11 + // |- a2 + // b + // input: + // DoTree([], 0) [a, b] + // DoTree([a], 1) [a] + // DoTree([a, ], 0) [a1, a2] + // DoTree([a, a], 1) [a1, a2] + // DoTree([a, a1], 2) [a1] + // DoTree([a, a1, ], 0) [a11] + // DoTree([a, a1, a], 1) [a11] + DoSegment([][]rune, int) [][]rune +} + +type dumpSegmentCompleter struct { + f func([][]rune, int) [][]rune +} + +func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune { + return d.f(segment, n) +} + +func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter { + return &SegmentComplete{&dumpSegmentCompleter{f}} +} + +func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete { + return &SegmentComplete{ + SegmentCompleter: completer, + } +} + +type SegmentComplete struct { + SegmentCompleter +} + +func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) { + ret := make([][]rune, 0, len(cands)) + lastSegment := segments[len(segments)-1] + for _, cand := range cands { + if !runes.HasPrefix(cand, lastSegment) { + continue + } + ret = append(ret, cand[len(lastSegment):]) + } + return ret, idx +} + +func SplitSegment(line []rune, pos int) ([][]rune, int) { + segs := [][]rune{} + lastIdx := -1 + line = line[:pos] + pos = 0 + for idx, l := range line { + if l == ' ' { + pos = 0 + segs = append(segs, line[lastIdx+1:idx]) + lastIdx = idx + } else { + pos++ + } + } + segs = append(segs, line[lastIdx+1:]) + return segs, pos +} + +func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) { + + segment, idx := SplitSegment(line, pos) + + cands := c.DoSegment(segment, idx) + newLine, offset = RetSegment(segment, cands, idx) + for idx := range newLine { + newLine[idx] = append(newLine[idx], ' ') + } + return newLine, offset +} diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go new file mode 100644 index 00000000000..6b17c464baf --- /dev/null +++ b/vendor/github.com/chzyer/readline/history.go @@ -0,0 +1,330 @@ +package readline + +import ( + "bufio" + "container/list" + "fmt" + "os" + "strings" + "sync" +) + +type hisItem struct { + Source []rune + Version int64 + Tmp []rune +} + +func (h *hisItem) Clean() { + h.Source = nil + h.Tmp = nil +} + +type opHistory struct { + cfg *Config + history *list.List + historyVer int64 + current *list.Element + fd *os.File + fdLock sync.Mutex + enable bool +} + +func newOpHistory(cfg *Config) (o *opHistory) { + o = &opHistory{ + cfg: cfg, + history: list.New(), + enable: true, + } + return o +} + +func (o *opHistory) Reset() { + o.history = list.New() + o.current = nil +} + +func (o *opHistory) IsHistoryClosed() bool { + o.fdLock.Lock() + defer o.fdLock.Unlock() + return o.fd.Fd() == ^(uintptr(0)) +} + +func (o *opHistory) Init() { + if o.IsHistoryClosed() { + o.initHistory() + } +} + +func (o *opHistory) initHistory() { + if o.cfg.HistoryFile != "" { + o.historyUpdatePath(o.cfg.HistoryFile) + } +} + +// only called by newOpHistory +func (o *opHistory) historyUpdatePath(path string) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return + } + o.fd = f + r := bufio.NewReader(o.fd) + total := 0 + for ; ; total++ { + line, err := r.ReadString('\n') + if err != nil { + break + } + // ignore the empty line + line = strings.TrimSpace(line) + if len(line) == 0 { + continue + } + o.Push([]rune(line)) + o.Compact() + } + if total > o.cfg.HistoryLimit { + o.rewriteLocked() + } + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Compact() { + for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 { + o.history.Remove(o.history.Front()) + } +} + +func (o *opHistory) Rewrite() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + o.rewriteLocked() +} + +func (o *opHistory) rewriteLocked() { + if o.cfg.HistoryFile == "" { + return + } + + tmpFile := o.cfg.HistoryFile + ".tmp" + fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666) + if err != nil { + return + } + + buf := bufio.NewWriter(fd) + for elem := o.history.Front(); elem != nil; elem = elem.Next() { + buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n") + } + buf.Flush() + + // replace history file + if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil { + fd.Close() + return + } + + if o.fd != nil { + o.fd.Close() + } + // fd is write only, just satisfy what we need. + o.fd = fd +} + +func (o *opHistory) Close() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + if o.fd != nil { + o.fd.Close() + } +} + +func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Prev() { + item := o.showItem(elem.Value) + if isNewSearch { + start += len(rs) + } + if elem == o.current { + if len(item) >= start { + item = item[:start] + } + } + idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Next() { + item := o.showItem(elem.Value) + if isNewSearch { + start -= len(rs) + if start < 0 { + start = 0 + } + } + if elem == o.current { + if len(item)-1 >= start { + item = item[start:] + } else { + continue + } + } + idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + if elem == o.current { + idx += start + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) showItem(obj interface{}) []rune { + item := obj.(*hisItem) + if item.Version == o.historyVer { + return item.Tmp + } + return item.Source +} + +func (o *opHistory) Prev() []rune { + if o.current == nil { + return nil + } + current := o.current.Prev() + if current == nil { + return nil + } + o.current = current + return runes.Copy(o.showItem(current.Value)) +} + +func (o *opHistory) Next() ([]rune, bool) { + if o.current == nil { + return nil, false + } + current := o.current.Next() + if current == nil { + return nil, false + } + + o.current = current + return runes.Copy(o.showItem(current.Value)), true +} + +// Disable the current history +func (o *opHistory) Disable() { + o.enable = false +} + +// Enable the current history +func (o *opHistory) Enable() { + o.enable = true +} + +func (o *opHistory) debug() { + Debug("-------") + for item := o.history.Front(); item != nil; item = item.Next() { + Debug(fmt.Sprintf("%+v", item.Value)) + } +} + +// save history +func (o *opHistory) New(current []rune) (err error) { + + // history deactivated + if !o.enable { + return nil + } + + current = runes.Copy(current) + + // if just use last command without modify + // just clean lastest history + if back := o.history.Back(); back != nil { + prev := back.Prev() + if prev != nil { + if runes.Equal(current, prev.Value.(*hisItem).Source) { + o.current = o.history.Back() + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + } + + if len(current) == 0 { + o.current = o.history.Back() + if o.current != nil { + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + + if o.current != o.history.Back() { + // move history item to current command + currentItem := o.current.Value.(*hisItem) + // set current to last item + o.current = o.history.Back() + + current = runes.Copy(currentItem.Tmp) + } + + // err only can be a IO error, just report + err = o.Update(current, true) + + // push a new one to commit current command + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Revert() { + o.historyVer++ + o.current = o.history.Back() +} + +func (o *opHistory) Update(s []rune, commit bool) (err error) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + s = runes.Copy(s) + if o.current == nil { + o.Push(s) + o.Compact() + return + } + r := o.current.Value.(*hisItem) + r.Version = o.historyVer + if commit { + r.Source = s + if o.fd != nil { + // just report the error + _, err = o.fd.Write([]byte(string(r.Source) + "\n")) + } + } else { + r.Tmp = append(r.Tmp[:0], s...) + } + o.current.Value = r + o.Compact() + return +} + +func (o *opHistory) Push(s []rune) { + s = runes.Copy(s) + elem := o.history.PushBack(&hisItem{Source: s}) + o.current = elem +} diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go new file mode 100644 index 00000000000..4c31624f806 --- /dev/null +++ b/vendor/github.com/chzyer/readline/operation.go @@ -0,0 +1,531 @@ +package readline + +import ( + "errors" + "io" + "sync" +) + +var ( + ErrInterrupt = errors.New("Interrupt") +) + +type InterruptError struct { + Line []rune +} + +func (*InterruptError) Error() string { + return "Interrupted" +} + +type Operation struct { + m sync.Mutex + cfg *Config + t *Terminal + buf *RuneBuffer + outchan chan []rune + errchan chan error + w io.Writer + + history *opHistory + *opSearch + *opCompleter + *opPassword + *opVim +} + +func (o *Operation) SetBuffer(what string) { + o.buf.Set([]rune(what)) +} + +type wrapWriter struct { + r *Operation + t *Terminal + target io.Writer +} + +func (w *wrapWriter) Write(b []byte) (int, error) { + if !w.t.IsReading() { + return w.target.Write(b) + } + + var ( + n int + err error + ) + w.r.buf.Refresh(func() { + n, err = w.target.Write(b) + }) + + if w.r.IsSearchMode() { + w.r.SearchRefresh(-1) + } + if w.r.IsInCompleteMode() { + w.r.CompleteRefresh() + } + return n, err +} + +func NewOperation(t *Terminal, cfg *Config) *Operation { + width := cfg.FuncGetWidth() + op := &Operation{ + t: t, + buf: NewRuneBuffer(t, cfg.Prompt, cfg, width), + outchan: make(chan []rune), + errchan: make(chan error, 1), + } + op.w = op.buf.w + op.SetConfig(cfg) + op.opVim = newVimMode(op) + op.opCompleter = newOpCompleter(op.buf.w, op, width) + op.opPassword = newOpPassword(op) + op.cfg.FuncOnWidthChanged(func() { + newWidth := cfg.FuncGetWidth() + op.opCompleter.OnWidthChange(newWidth) + op.opSearch.OnWidthChange(newWidth) + op.buf.OnWidthChange(newWidth) + }) + go op.ioloop() + return op +} + +func (o *Operation) SetPrompt(s string) { + o.buf.SetPrompt(s) +} + +func (o *Operation) SetMaskRune(r rune) { + o.buf.SetMask(r) +} + +func (o *Operation) GetConfig() *Config { + o.m.Lock() + cfg := *o.cfg + o.m.Unlock() + return &cfg +} + +func (o *Operation) ioloop() { + for { + keepInSearchMode := false + keepInCompleteMode := false + r := o.t.ReadRune() + if o.GetConfig().FuncFilterInputRune != nil { + var process bool + r, process = o.GetConfig().FuncFilterInputRune(r) + if !process { + o.buf.Refresh(nil) // to refresh the line + continue // ignore this rune + } + } + + if r == 0 { // io.EOF + if o.buf.Len() == 0 { + o.buf.Clean() + select { + case o.errchan <- io.EOF: + } + break + } else { + // if stdin got io.EOF and there is something left in buffer, + // let's flush them by sending CharEnter. + // And we will got io.EOF int next loop. + r = CharEnter + } + } + isUpdateHistory := true + + if o.IsInCompleteSelectMode() { + keepInCompleteMode = o.HandleCompleteSelect(r) + if keepInCompleteMode { + continue + } + + o.buf.Refresh(nil) + switch r { + case CharEnter, CharCtrlJ: + o.history.Update(o.buf.Runes(), false) + fallthrough + case CharInterrupt: + o.t.KickRead() + fallthrough + case CharBell: + continue + } + } + + if o.IsEnableVimMode() { + r = o.HandleVim(r, o.t.ReadRune) + if r == 0 { + continue + } + } + + switch r { + case CharBell: + if o.IsSearchMode() { + o.ExitSearchMode(true) + o.buf.Refresh(nil) + } + if o.IsInCompleteMode() { + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + } + case CharTab: + if o.GetConfig().AutoComplete == nil { + o.t.Bell() + break + } + if o.OnComplete() { + keepInCompleteMode = true + } else { + o.t.Bell() + break + } + + case CharBckSearch: + if !o.SearchMode(S_DIR_BCK) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharCtrlU: + o.buf.KillFront() + case CharFwdSearch: + if !o.SearchMode(S_DIR_FWD) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharKill: + o.buf.Kill() + keepInCompleteMode = true + case MetaForward: + o.buf.MoveToNextWord() + case CharTranspose: + o.buf.Transpose() + case MetaBackward: + o.buf.MoveToPrevWord() + case MetaDelete: + o.buf.DeleteWord() + case CharLineStart: + o.buf.MoveToLineStart() + case CharLineEnd: + o.buf.MoveToLineEnd() + case CharBackspace, CharCtrlH: + if o.IsSearchMode() { + o.SearchBackspace() + keepInSearchMode = true + break + } + + if o.buf.Len() == 0 { + o.t.Bell() + break + } + o.buf.Backspace() + if o.IsInCompleteMode() { + o.OnComplete() + } + case CharCtrlZ: + o.buf.Clean() + o.t.SleepToResume() + o.Refresh() + case CharCtrlL: + ClearScreen(o.w) + o.Refresh() + case MetaBackspace, CharCtrlW: + o.buf.BackEscapeWord() + case CharCtrlY: + o.buf.Yank() + case CharEnter, CharCtrlJ: + if o.IsSearchMode() { + o.ExitSearchMode(false) + } + o.buf.MoveToLineEnd() + var data []rune + if !o.GetConfig().UniqueEditLine { + o.buf.WriteRune('\n') + data = o.buf.Reset() + data = data[:len(data)-1] // trim \n + } else { + o.buf.Clean() + data = o.buf.Reset() + } + o.outchan <- data + if !o.GetConfig().DisableAutoSaveHistory { + // ignore IO error + _ = o.history.New(data) + } else { + isUpdateHistory = false + } + case CharBackward: + o.buf.MoveBackward() + case CharForward: + o.buf.MoveForward() + case CharPrev: + buf := o.history.Prev() + if buf != nil { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharNext: + buf, ok := o.history.Next() + if ok { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharDelete: + if o.buf.Len() > 0 || !o.IsNormalMode() { + o.t.KickRead() + if !o.buf.Delete() { + o.t.Bell() + } + break + } + + // treat as EOF + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(o.GetConfig().EOFPrompt + "\n") + } + o.buf.Reset() + isUpdateHistory = false + o.history.Revert() + o.errchan <- io.EOF + if o.GetConfig().UniqueEditLine { + o.buf.Clean() + } + case CharInterrupt: + if o.IsSearchMode() { + o.t.KickRead() + o.ExitSearchMode(true) + break + } + if o.IsInCompleteMode() { + o.t.KickRead() + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + break + } + o.buf.MoveToLineEnd() + o.buf.Refresh(nil) + hint := o.GetConfig().InterruptPrompt + "\n" + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(hint) + } + remain := o.buf.Reset() + if !o.GetConfig().UniqueEditLine { + remain = remain[:len(remain)-len([]rune(hint))] + } + isUpdateHistory = false + o.history.Revert() + o.errchan <- &InterruptError{remain} + default: + if o.IsSearchMode() { + o.SearchChar(r) + keepInSearchMode = true + break + } + o.buf.WriteRune(r) + if o.IsInCompleteMode() { + o.OnComplete() + keepInCompleteMode = true + } + } + + listener := o.GetConfig().Listener + if listener != nil { + newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r) + if ok { + o.buf.SetWithIdx(newPos, newLine) + } + } + + o.m.Lock() + if !keepInSearchMode && o.IsSearchMode() { + o.ExitSearchMode(false) + o.buf.Refresh(nil) + } else if o.IsInCompleteMode() { + if !keepInCompleteMode { + o.ExitCompleteMode(false) + o.Refresh() + } else { + o.buf.Refresh(nil) + o.CompleteRefresh() + } + } + if isUpdateHistory && !o.IsSearchMode() { + // it will cause null history + o.history.Update(o.buf.Runes(), false) + } + o.m.Unlock() + } +} + +func (o *Operation) Stderr() io.Writer { + return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t} +} + +func (o *Operation) Stdout() io.Writer { + return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t} +} + +func (o *Operation) String() (string, error) { + r, err := o.Runes() + return string(r), err +} + +func (o *Operation) Runes() ([]rune, error) { + o.t.EnterRawMode() + defer o.t.ExitRawMode() + + listener := o.GetConfig().Listener + if listener != nil { + listener.OnChange(nil, 0, 0) + } + + o.buf.Refresh(nil) // print prompt + o.t.KickRead() + select { + case r := <-o.outchan: + return r, nil + case err := <-o.errchan: + if e, ok := err.(*InterruptError); ok { + return e.Line, ErrInterrupt + } + return nil, err + } +} + +func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) { + cfg := o.GenPasswordConfig() + cfg.Prompt = prompt + cfg.Listener = l + return o.PasswordWithConfig(cfg) +} + +func (o *Operation) GenPasswordConfig() *Config { + return o.opPassword.PasswordConfig() +} + +func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) { + if err := o.opPassword.EnterPasswordMode(cfg); err != nil { + return nil, err + } + defer o.opPassword.ExitPasswordMode() + return o.Slice() +} + +func (o *Operation) Password(prompt string) ([]byte, error) { + return o.PasswordEx(prompt, nil) +} + +func (o *Operation) SetTitle(t string) { + o.w.Write([]byte("\033[2;" + t + "\007")) +} + +func (o *Operation) Slice() ([]byte, error) { + r, err := o.Runes() + if err != nil { + return nil, err + } + return []byte(string(r)), nil +} + +func (o *Operation) Close() { + o.history.Close() +} + +func (o *Operation) SetHistoryPath(path string) { + if o.history != nil { + o.history.Close() + } + o.cfg.HistoryFile = path + o.history = newOpHistory(o.cfg) +} + +func (o *Operation) IsNormalMode() bool { + return !o.IsInCompleteMode() && !o.IsSearchMode() +} + +func (op *Operation) SetConfig(cfg *Config) (*Config, error) { + op.m.Lock() + defer op.m.Unlock() + if op.cfg == cfg { + return op.cfg, nil + } + if err := cfg.Init(); err != nil { + return op.cfg, err + } + old := op.cfg + op.cfg = cfg + op.SetPrompt(cfg.Prompt) + op.SetMaskRune(cfg.MaskRune) + op.buf.SetConfig(cfg) + width := op.cfg.FuncGetWidth() + + if cfg.opHistory == nil { + op.SetHistoryPath(cfg.HistoryFile) + cfg.opHistory = op.history + cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width) + } + op.history = cfg.opHistory + + // SetHistoryPath will close opHistory which already exists + // so if we use it next time, we need to reopen it by `InitHistory()` + op.history.Init() + + if op.cfg.AutoComplete != nil { + op.opCompleter = newOpCompleter(op.buf.w, op, width) + } + + op.opSearch = cfg.opSearch + return old, nil +} + +func (o *Operation) ResetHistory() { + o.history.Reset() +} + +// if err is not nil, it just mean it fail to write to file +// other things goes fine. +func (o *Operation) SaveHistory(content string) error { + return o.history.New([]rune(content)) +} + +func (o *Operation) Refresh() { + if o.t.IsReading() { + o.buf.Refresh(nil) + } +} + +func (o *Operation) Clean() { + o.buf.Clean() +} + +func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener { + return &DumpListener{f: f} +} + +type DumpListener struct { + f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { + return d.f(line, pos, key) +} + +type Listener interface { + OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +type Painter interface { + Paint(line []rune, pos int) []rune +} + +type defaultPainter struct{} + +func (p *defaultPainter) Paint(line []rune, _ int) []rune { + return line +} diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go new file mode 100644 index 00000000000..414288c2a50 --- /dev/null +++ b/vendor/github.com/chzyer/readline/password.go @@ -0,0 +1,33 @@ +package readline + +type opPassword struct { + o *Operation + backupCfg *Config +} + +func newOpPassword(o *Operation) *opPassword { + return &opPassword{o: o} +} + +func (o *opPassword) ExitPasswordMode() { + o.o.SetConfig(o.backupCfg) + o.backupCfg = nil +} + +func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) { + o.backupCfg, err = o.o.SetConfig(cfg) + return +} + +func (o *opPassword) PasswordConfig() *Config { + return &Config{ + EnableMask: true, + InterruptPrompt: "\n", + EOFPrompt: "\n", + HistoryLimit: -1, + Painter: &defaultPainter{}, + + Stdout: o.o.cfg.Stdout, + Stderr: o.o.cfg.Stderr, + } +} diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go new file mode 100644 index 00000000000..073ef150a59 --- /dev/null +++ b/vendor/github.com/chzyer/readline/rawreader_windows.go @@ -0,0 +1,125 @@ +// +build windows + +package readline + +import "unsafe" + +const ( + VK_CANCEL = 0x03 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_ESCAPE = 0x1B + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_DELETE = 0x2E + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 +) + +// RawReader translate input record to ANSI escape sequence. +// To provides same behavior as unix terminal. +type RawReader struct { + ctrlKey bool + altKey bool +} + +func NewRawReader() *RawReader { + r := new(RawReader) + return r +} + +// only process one action in one read +func (r *RawReader) Read(buf []byte) (int, error) { + ir := new(_INPUT_RECORD) + var read int + var err error +next: + err = kernel.ReadConsoleInputW(stdin, + uintptr(unsafe.Pointer(ir)), + 1, + uintptr(unsafe.Pointer(&read)), + ) + if err != nil { + return 0, err + } + if ir.EventType != EVENT_KEY { + goto next + } + ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0])) + if ker.bKeyDown == 0 { // keyup + if r.ctrlKey || r.altKey { + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = false + case VK_MENU: //alt + r.altKey = false + } + } + goto next + } + + if ker.unicodeChar == 0 { + var target rune + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = true + case VK_MENU: //alt + r.altKey = true + case VK_LEFT: + target = CharBackward + case VK_RIGHT: + target = CharForward + case VK_UP: + target = CharPrev + case VK_DOWN: + target = CharNext + } + if target != 0 { + return r.write(buf, target) + } + goto next + } + char := rune(ker.unicodeChar) + if r.ctrlKey { + switch char { + case 'A': + char = CharLineStart + case 'E': + char = CharLineEnd + case 'R': + char = CharBckSearch + case 'S': + char = CharFwdSearch + } + } else if r.altKey { + switch char { + case VK_BACK: + char = CharBackspace + } + return r.writeEsc(buf, char) + } + return r.write(buf, char) +} + +func (r *RawReader) writeEsc(b []byte, char rune) (int, error) { + b[0] = '\033' + n := copy(b[1:], []byte(string(char))) + return n + 1, nil +} + +func (r *RawReader) write(b []byte, char rune) (int, error) { + n := copy(b, []byte(string(char))) + return n, nil +} + +func (r *RawReader) Close() error { + return nil +} diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go new file mode 100644 index 00000000000..0e7aca06d5a --- /dev/null +++ b/vendor/github.com/chzyer/readline/readline.go @@ -0,0 +1,326 @@ +// Readline is a pure go implementation for GNU-Readline kind library. +// +// example: +// rl, err := readline.New("> ") +// if err != nil { +// panic(err) +// } +// defer rl.Close() +// +// for { +// line, err := rl.Readline() +// if err != nil { // io.EOF +// break +// } +// println(line) +// } +// +package readline + +import "io" + +type Instance struct { + Config *Config + Terminal *Terminal + Operation *Operation +} + +type Config struct { + // prompt supports ANSI escape sequence, so we can color some characters even in windows + Prompt string + + // readline will persist historys to file where HistoryFile specified + HistoryFile string + // specify the max length of historys, it's 500 by default, set it to -1 to disable history + HistoryLimit int + DisableAutoSaveHistory bool + // enable case-insensitive history searching + HistorySearchFold bool + + // AutoCompleter will called once user press TAB + AutoComplete AutoCompleter + + // Any key press will pass to Listener + // NOTE: Listener will be triggered by (nil, 0, 0) immediately + Listener Listener + + Painter Painter + + // If VimMode is true, readline will in vim.insert mode by default + VimMode bool + + InterruptPrompt string + EOFPrompt string + + FuncGetWidth func() int + + Stdin io.ReadCloser + StdinWriter io.Writer + Stdout io.Writer + Stderr io.Writer + + EnableMask bool + MaskRune rune + + // erase the editing line after user submited it + // it use in IM usually. + UniqueEditLine bool + + // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions) + // -> output = new (translated) rune and true/false if continue with processing this one + FuncFilterInputRune func(rune) (rune, bool) + + // force use interactive even stdout is not a tty + FuncIsTerminal func() bool + FuncMakeRaw func() error + FuncExitRaw func() error + FuncOnWidthChanged func(func()) + ForceUseInteractive bool + + // private fields + inited bool + opHistory *opHistory + opSearch *opSearch +} + +func (c *Config) useInteractive() bool { + if c.ForceUseInteractive { + return true + } + return c.FuncIsTerminal() +} + +func (c *Config) Init() error { + if c.inited { + return nil + } + c.inited = true + if c.Stdin == nil { + c.Stdin = NewCancelableStdin(Stdin) + } + + c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin) + + if c.Stdout == nil { + c.Stdout = Stdout + } + if c.Stderr == nil { + c.Stderr = Stderr + } + if c.HistoryLimit == 0 { + c.HistoryLimit = 500 + } + + if c.InterruptPrompt == "" { + c.InterruptPrompt = "^C" + } else if c.InterruptPrompt == "\n" { + c.InterruptPrompt = "" + } + if c.EOFPrompt == "" { + c.EOFPrompt = "^D" + } else if c.EOFPrompt == "\n" { + c.EOFPrompt = "" + } + + if c.AutoComplete == nil { + c.AutoComplete = &TabCompleter{} + } + if c.FuncGetWidth == nil { + c.FuncGetWidth = GetScreenWidth + } + if c.FuncIsTerminal == nil { + c.FuncIsTerminal = DefaultIsTerminal + } + rm := new(RawMode) + if c.FuncMakeRaw == nil { + c.FuncMakeRaw = rm.Enter + } + if c.FuncExitRaw == nil { + c.FuncExitRaw = rm.Exit + } + if c.FuncOnWidthChanged == nil { + c.FuncOnWidthChanged = DefaultOnWidthChanged + } + + return nil +} + +func (c Config) Clone() *Config { + c.opHistory = nil + c.opSearch = nil + return &c +} + +func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) { + c.Listener = FuncListener(f) +} + +func (c *Config) SetPainter(p Painter) { + c.Painter = p +} + +func NewEx(cfg *Config) (*Instance, error) { + t, err := NewTerminal(cfg) + if err != nil { + return nil, err + } + rl := t.Readline() + if cfg.Painter == nil { + cfg.Painter = &defaultPainter{} + } + return &Instance{ + Config: cfg, + Terminal: t, + Operation: rl, + }, nil +} + +func New(prompt string) (*Instance, error) { + return NewEx(&Config{Prompt: prompt}) +} + +func (i *Instance) ResetHistory() { + i.Operation.ResetHistory() +} + +func (i *Instance) SetPrompt(s string) { + i.Operation.SetPrompt(s) +} + +func (i *Instance) SetMaskRune(r rune) { + i.Operation.SetMaskRune(r) +} + +// change history persistence in runtime +func (i *Instance) SetHistoryPath(p string) { + i.Operation.SetHistoryPath(p) +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stdout() io.Writer { + return i.Operation.Stdout() +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stderr() io.Writer { + return i.Operation.Stderr() +} + +// switch VimMode in runtime +func (i *Instance) SetVimMode(on bool) { + i.Operation.SetVimMode(on) +} + +func (i *Instance) IsVimMode() bool { + return i.Operation.IsEnableVimMode() +} + +func (i *Instance) GenPasswordConfig() *Config { + return i.Operation.GenPasswordConfig() +} + +// we can generate a config by `i.GenPasswordConfig()` +func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) { + return i.Operation.PasswordWithConfig(cfg) +} + +func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) { + return i.Operation.PasswordEx(prompt, l) +} + +func (i *Instance) ReadPassword(prompt string) ([]byte, error) { + return i.Operation.Password(prompt) +} + +type Result struct { + Line string + Error error +} + +func (l *Result) CanContinue() bool { + return len(l.Line) != 0 && l.Error == ErrInterrupt +} + +func (l *Result) CanBreak() bool { + return !l.CanContinue() && l.Error != nil +} + +func (i *Instance) Line() *Result { + ret, err := i.Readline() + return &Result{ret, err} +} + +// err is one of (nil, io.EOF, readline.ErrInterrupt) +func (i *Instance) Readline() (string, error) { + return i.Operation.String() +} + +func (i *Instance) ReadlineWithDefault(what string) (string, error) { + i.Operation.SetBuffer(what) + return i.Operation.String() +} + +func (i *Instance) SaveHistory(content string) error { + return i.Operation.SaveHistory(content) +} + +// same as readline +func (i *Instance) ReadSlice() ([]byte, error) { + return i.Operation.Slice() +} + +// we must make sure that call Close() before process exit. +func (i *Instance) Close() error { + if err := i.Terminal.Close(); err != nil { + return err + } + i.Config.Stdin.Close() + i.Operation.Close() + return nil +} +func (i *Instance) Clean() { + i.Operation.Clean() +} + +func (i *Instance) Write(b []byte) (int, error) { + return i.Stdout().Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +// ie : +// i := readline.New() +// i.WriteStdin([]byte("test")) +// _, _= i.Readline() +// +// gives +// +// > test[cursor] +func (i *Instance) WriteStdin(val []byte) (int, error) { + return i.Terminal.WriteStdin(val) +} + +func (i *Instance) SetConfig(cfg *Config) *Config { + if i.Config == cfg { + return cfg + } + old := i.Config + i.Config = cfg + i.Operation.SetConfig(cfg) + i.Terminal.SetConfig(cfg) + return old +} + +func (i *Instance) Refresh() { + i.Operation.Refresh() +} + +// HistoryDisable the save of the commands into the history +func (i *Instance) HistoryDisable() { + i.Operation.history.Disable() +} + +// HistoryEnable the save of the commands into the history (default on) +func (i *Instance) HistoryEnable() { + i.Operation.history.Enable() +} diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go new file mode 100644 index 00000000000..74dbf569022 --- /dev/null +++ b/vendor/github.com/chzyer/readline/remote.go @@ -0,0 +1,475 @@ +package readline + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "os" + "sync" + "sync/atomic" +) + +type MsgType int16 + +const ( + T_DATA = MsgType(iota) + T_WIDTH + T_WIDTH_REPORT + T_ISTTY_REPORT + T_RAW + T_ERAW // exit raw + T_EOF +) + +type RemoteSvr struct { + eof int32 + closed int32 + width int32 + reciveChan chan struct{} + writeChan chan *writeCtx + conn net.Conn + isTerminal bool + funcWidthChan func() + stopChan chan struct{} + + dataBufM sync.Mutex + dataBuf bytes.Buffer +} + +type writeReply struct { + n int + err error +} + +type writeCtx struct { + msg *Message + reply chan *writeReply +} + +func newWriteCtx(msg *Message) *writeCtx { + return &writeCtx{ + msg: msg, + reply: make(chan *writeReply), + } +} + +func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) { + rs := &RemoteSvr{ + width: -1, + conn: conn, + writeChan: make(chan *writeCtx), + reciveChan: make(chan struct{}), + stopChan: make(chan struct{}), + } + buf := bufio.NewReader(rs.conn) + + if err := rs.init(buf); err != nil { + return nil, err + } + + go rs.readLoop(buf) + go rs.writeLoop() + return rs, nil +} + +func (r *RemoteSvr) init(buf *bufio.Reader) error { + m, err := ReadMessage(buf) + if err != nil { + return err + } + // receive isTerminal + if m.Type != T_ISTTY_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotIsTerminal(m.Data) + + // receive width + m, err = ReadMessage(buf) + if err != nil { + return err + } + if m.Type != T_WIDTH_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotReportWidth(m.Data) + + return nil +} + +func (r *RemoteSvr) HandleConfig(cfg *Config) { + cfg.Stderr = r + cfg.Stdout = r + cfg.Stdin = r + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncIsTerminal = r.IsTerminal + cfg.FuncMakeRaw = r.EnterRawMode + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncGetWidth = r.GetWidth + cfg.FuncOnWidthChanged = func(f func()) { + r.funcWidthChan = f + } +} + +func (r *RemoteSvr) IsTerminal() bool { + return r.isTerminal +} + +func (r *RemoteSvr) checkEOF() error { + if atomic.LoadInt32(&r.eof) == 1 { + return io.EOF + } + return nil +} + +func (r *RemoteSvr) Read(b []byte) (int, error) { + r.dataBufM.Lock() + n, err := r.dataBuf.Read(b) + r.dataBufM.Unlock() + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + if n == 0 && err == io.EOF { + <-r.reciveChan + r.dataBufM.Lock() + n, err = r.dataBuf.Read(b) + r.dataBufM.Unlock() + } + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + return n, err +} + +func (r *RemoteSvr) writeMsg(m *Message) error { + ctx := newWriteCtx(m) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.err +} + +func (r *RemoteSvr) Write(b []byte) (int, error) { + ctx := newWriteCtx(NewMessage(T_DATA, b)) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.n, reply.err +} + +func (r *RemoteSvr) EnterRawMode() error { + return r.writeMsg(NewMessage(T_RAW, nil)) +} + +func (r *RemoteSvr) ExitRawMode() error { + return r.writeMsg(NewMessage(T_ERAW, nil)) +} + +func (r *RemoteSvr) writeLoop() { + defer r.Close() + +loop: + for { + select { + case ctx, ok := <-r.writeChan: + if !ok { + break + } + n, err := ctx.msg.WriteTo(r.conn) + ctx.reply <- &writeReply{n, err} + case <-r.stopChan: + break loop + } + } +} + +func (r *RemoteSvr) Close() error { + if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { + close(r.stopChan) + r.conn.Close() + } + return nil +} + +func (r *RemoteSvr) readLoop(buf *bufio.Reader) { + defer r.Close() + for { + m, err := ReadMessage(buf) + if err != nil { + break + } + switch m.Type { + case T_EOF: + atomic.StoreInt32(&r.eof, 1) + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_DATA: + r.dataBufM.Lock() + r.dataBuf.Write(m.Data) + r.dataBufM.Unlock() + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_WIDTH_REPORT: + r.GotReportWidth(m.Data) + case T_ISTTY_REPORT: + r.GotIsTerminal(m.Data) + } + } +} + +func (r *RemoteSvr) GotIsTerminal(data []byte) { + if binary.BigEndian.Uint16(data) == 0 { + r.isTerminal = false + } else { + r.isTerminal = true + } +} + +func (r *RemoteSvr) GotReportWidth(data []byte) { + atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data))) + if r.funcWidthChan != nil { + r.funcWidthChan() + } +} + +func (r *RemoteSvr) GetWidth() int { + return int(atomic.LoadInt32(&r.width)) +} + +// ----------------------------------------------------------------------------- + +type Message struct { + Type MsgType + Data []byte +} + +func ReadMessage(r io.Reader) (*Message, error) { + m := new(Message) + var length int32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil { + return nil, err + } + m.Data = make([]byte, int(length)-2) + if _, err := io.ReadFull(r, m.Data); err != nil { + return nil, err + } + return m, nil +} + +func NewMessage(t MsgType, data []byte) *Message { + return &Message{t, data} +} + +func (m *Message) WriteTo(w io.Writer) (int, error) { + buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4)) + binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2)) + binary.Write(buf, binary.BigEndian, m.Type) + buf.Write(m.Data) + n, err := buf.WriteTo(w) + return int(n), err +} + +// ----------------------------------------------------------------------------- + +type RemoteCli struct { + conn net.Conn + raw RawMode + receiveChan chan struct{} + inited int32 + isTerminal *bool + + data bytes.Buffer + dataM sync.Mutex +} + +func NewRemoteCli(conn net.Conn) (*RemoteCli, error) { + r := &RemoteCli{ + conn: conn, + receiveChan: make(chan struct{}), + } + return r, nil +} + +func (r *RemoteCli) MarkIsTerminal(is bool) { + r.isTerminal = &is +} + +func (r *RemoteCli) init() error { + if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) { + return nil + } + + if err := r.reportIsTerminal(); err != nil { + return err + } + + if err := r.reportWidth(); err != nil { + return err + } + + // register sig for width changed + DefaultOnWidthChanged(func() { + r.reportWidth() + }) + return nil +} + +func (r *RemoteCli) writeMsg(m *Message) error { + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return err +} + +func (r *RemoteCli) Write(b []byte) (int, error) { + m := NewMessage(T_DATA, b) + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return len(b), err +} + +func (r *RemoteCli) reportWidth() error { + screenWidth := GetScreenWidth() + data := make([]byte, 2) + binary.BigEndian.PutUint16(data, uint16(screenWidth)) + msg := NewMessage(T_WIDTH_REPORT, data) + + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) reportIsTerminal() error { + var isTerminal bool + if r.isTerminal != nil { + isTerminal = *r.isTerminal + } else { + isTerminal = DefaultIsTerminal() + } + data := make([]byte, 2) + if isTerminal { + binary.BigEndian.PutUint16(data, 1) + } else { + binary.BigEndian.PutUint16(data, 0) + } + msg := NewMessage(T_ISTTY_REPORT, data) + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) readLoop() { + buf := bufio.NewReader(r.conn) + for { + msg, err := ReadMessage(buf) + if err != nil { + break + } + switch msg.Type { + case T_ERAW: + r.raw.Exit() + case T_RAW: + r.raw.Enter() + case T_DATA: + os.Stdout.Write(msg.Data) + } + } +} + +func (r *RemoteCli) ServeBy(source io.Reader) error { + if err := r.init(); err != nil { + return err + } + + go func() { + defer r.Close() + for { + n, _ := io.Copy(r, source) + if n == 0 { + break + } + } + }() + defer r.raw.Exit() + r.readLoop() + return nil +} + +func (r *RemoteCli) Close() { + r.writeMsg(NewMessage(T_EOF, nil)) +} + +func (r *RemoteCli) Serve() error { + return r.ServeBy(os.Stdin) +} + +func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error { + ln, err := net.Listen(n, addr) + if err != nil { + return err + } + if len(onListen) > 0 { + if err := onListen[0](ln); err != nil { + return err + } + } + for { + conn, err := ln.Accept() + if err != nil { + break + } + go func() { + defer conn.Close() + rl, err := HandleConn(*cfg, conn) + if err != nil { + return + } + h(rl) + }() + } + return nil +} + +func HandleConn(cfg Config, conn net.Conn) (*Instance, error) { + r, err := NewRemoteSvr(conn) + if err != nil { + return nil, err + } + r.HandleConfig(&cfg) + + rl, err := NewEx(&cfg) + if err != nil { + return nil, err + } + return rl, nil +} + +func DialRemote(n, addr string) error { + conn, err := net.Dial(n, addr) + if err != nil { + return err + } + defer conn.Close() + + cli, err := NewRemoteCli(conn) + if err != nil { + return err + } + return cli.Serve() +} diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go new file mode 100644 index 00000000000..81d2da50ccb --- /dev/null +++ b/vendor/github.com/chzyer/readline/runebuf.go @@ -0,0 +1,629 @@ +package readline + +import ( + "bufio" + "bytes" + "io" + "strconv" + "strings" + "sync" +) + +type runeBufferBck struct { + buf []rune + idx int +} + +type RuneBuffer struct { + buf []rune + idx int + prompt []rune + w io.Writer + + hadClean bool + interactive bool + cfg *Config + + width int + + bck *runeBufferBck + + offset string + + lastKill []rune + + sync.Mutex +} + +func (r* RuneBuffer) pushKill(text []rune) { + r.lastKill = append([]rune{}, text...) +} + +func (r *RuneBuffer) OnWidthChange(newWidth int) { + r.Lock() + r.width = newWidth + r.Unlock() +} + +func (r *RuneBuffer) Backup() { + r.Lock() + r.bck = &runeBufferBck{r.buf, r.idx} + r.Unlock() +} + +func (r *RuneBuffer) Restore() { + r.Refresh(func() { + if r.bck == nil { + return + } + r.buf = r.bck.buf + r.idx = r.bck.idx + }) +} + +func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer { + rb := &RuneBuffer{ + w: w, + interactive: cfg.useInteractive(), + cfg: cfg, + width: width, + } + rb.SetPrompt(prompt) + return rb +} + +func (r *RuneBuffer) SetConfig(cfg *Config) { + r.Lock() + r.cfg = cfg + r.interactive = cfg.useInteractive() + r.Unlock() +} + +func (r *RuneBuffer) SetMask(m rune) { + r.Lock() + r.cfg.MaskRune = m + r.Unlock() +} + +func (r *RuneBuffer) CurrentWidth(x int) int { + r.Lock() + defer r.Unlock() + return runes.WidthAll(r.buf[:x]) +} + +func (r *RuneBuffer) PromptLen() int { + r.Lock() + width := r.promptLen() + r.Unlock() + return width +} + +func (r *RuneBuffer) promptLen() int { + return runes.WidthAll(runes.ColorFilter(r.prompt)) +} + +func (r *RuneBuffer) RuneSlice(i int) []rune { + r.Lock() + defer r.Unlock() + + if i > 0 { + rs := make([]rune, i) + copy(rs, r.buf[r.idx:r.idx+i]) + return rs + } + rs := make([]rune, -i) + copy(rs, r.buf[r.idx+i:r.idx]) + return rs +} + +func (r *RuneBuffer) Runes() []rune { + r.Lock() + newr := make([]rune, len(r.buf)) + copy(newr, r.buf) + r.Unlock() + return newr +} + +func (r *RuneBuffer) Pos() int { + r.Lock() + defer r.Unlock() + return r.idx +} + +func (r *RuneBuffer) Len() int { + r.Lock() + defer r.Unlock() + return len(r.buf) +} + +func (r *RuneBuffer) MoveToLineStart() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx = 0 + }) +} + +func (r *RuneBuffer) MoveBackward() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx-- + }) +} + +func (r *RuneBuffer) WriteString(s string) { + r.WriteRunes([]rune(s)) +} + +func (r *RuneBuffer) WriteRune(s rune) { + r.WriteRunes([]rune{s}) +} + +func (r *RuneBuffer) WriteRunes(s []rune) { + r.Refresh(func() { + tail := append(s, r.buf[r.idx:]...) + r.buf = append(r.buf[:r.idx], tail...) + r.idx += len(s) + }) +} + +func (r *RuneBuffer) MoveForward() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.idx++ + }) +} + +func (r *RuneBuffer) IsCursorInEnd() bool { + r.Lock() + defer r.Unlock() + return r.idx == len(r.buf) +} + +func (r *RuneBuffer) Replace(ch rune) { + r.Refresh(func() { + r.buf[r.idx] = ch + }) +} + +func (r *RuneBuffer) Erase() { + r.Refresh(func() { + r.idx = 0 + r.pushKill(r.buf[:]) + r.buf = r.buf[:0] + }) +} + +func (r *RuneBuffer) Delete() (success bool) { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.pushKill(r.buf[r.idx : r.idx+1]) + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + success = true + }) + return +} + +func (r *RuneBuffer) DeleteWord() { + if r.idx == len(r.buf) { + return + } + init := r.idx + for init < len(r.buf) && IsWordBreak(r.buf[init]) { + init++ + } + for i := init + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[r.idx:i-1]) + r.Refresh(func() { + r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) + }) + return + } + } + r.Kill() +} + +func (r *RuneBuffer) MoveToPrevWord() (success bool) { + r.Refresh(func() { + if r.idx == 0 { + return + } + + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + success = true + return + } + } + r.idx = 0 + success = true + }) + return +} + +func (r *RuneBuffer) KillFront() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + length := len(r.buf) - r.idx + r.pushKill(r.buf[:r.idx]) + copy(r.buf[:length], r.buf[r.idx:]) + r.idx = 0 + r.buf = r.buf[:length] + }) +} + +func (r *RuneBuffer) Kill() { + r.Refresh(func() { + r.pushKill(r.buf[r.idx:]) + r.buf = r.buf[:r.idx] + }) +} + +func (r *RuneBuffer) Transpose() { + r.Refresh(func() { + if len(r.buf) == 1 { + r.idx++ + } + + if len(r.buf) < 2 { + return + } + + if r.idx == 0 { + r.idx = 1 + } else if r.idx >= len(r.buf) { + r.idx = len(r.buf) - 1 + } + r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx] + r.idx++ + }) +} + +func (r *RuneBuffer) MoveToNextWord() { + r.Refresh(func() { + for i := r.idx + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + return + } + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) MoveToEndWord() { + r.Refresh(func() { + // already at the end, so do nothing + if r.idx == len(r.buf) { + return + } + // if we are at the end of a word already, go to next + if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) { + r.idx++ + } + + // keep going until at the end of a word + for i := r.idx + 1; i < len(r.buf); i++ { + if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) { + r.idx = i - 1 + return + } + } + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) BackEscapeWord() { + r.Refresh(func() { + if r.idx == 0 { + return + } + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[i:r.idx]) + r.buf = append(r.buf[:i], r.buf[r.idx:]...) + r.idx = i + return + } + } + + r.buf = r.buf[:0] + r.idx = 0 + }) +} + +func (r *RuneBuffer) Yank() { + if len(r.lastKill) == 0 { + return + } + r.Refresh(func() { + buf := make([]rune, 0, len(r.buf) + len(r.lastKill)) + buf = append(buf, r.buf[:r.idx]...) + buf = append(buf, r.lastKill...) + buf = append(buf, r.buf[r.idx:]...) + r.buf = buf + r.idx += len(r.lastKill) + }) +} + +func (r *RuneBuffer) Backspace() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + r.idx-- + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + }) +} + +func (r *RuneBuffer) MoveToLineEnd() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) LineCount(width int) int { + if width == -1 { + width = r.width + } + return LineCount(width, + runes.WidthAll(r.buf)+r.PromptLen()) +} + +func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) { + r.Refresh(func() { + if reverse { + for i := r.idx - 1; i >= 0; i-- { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx++ + } + success = true + return + } + } + return + } + for i := r.idx + 1; i < len(r.buf); i++ { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx-- + } + success = true + return + } + } + }) + return +} + +func (r *RuneBuffer) isInLineEdge() bool { + if isWindows { + return false + } + sp := r.getSplitByLine(r.buf) + return len(sp[len(sp)-1]) == 0 +} + +func (r *RuneBuffer) getSplitByLine(rs []rune) []string { + return SplitByLine(r.promptLen(), r.width, rs) +} + +func (r *RuneBuffer) IdxLine(width int) int { + r.Lock() + defer r.Unlock() + return r.idxLine(width) +} + +func (r *RuneBuffer) idxLine(width int) int { + if width == 0 { + return 0 + } + sp := r.getSplitByLine(r.buf[:r.idx]) + return len(sp) - 1 +} + +func (r *RuneBuffer) CursorLineCount() int { + return r.LineCount(r.width) - r.IdxLine(r.width) +} + +func (r *RuneBuffer) Refresh(f func()) { + r.Lock() + defer r.Unlock() + + if !r.interactive { + if f != nil { + f() + } + return + } + + r.clean() + if f != nil { + f() + } + r.print() +} + +func (r *RuneBuffer) SetOffset(offset string) { + r.Lock() + r.offset = offset + r.Unlock() +} + +func (r *RuneBuffer) print() { + r.w.Write(r.output()) + r.hadClean = false +} + +func (r *RuneBuffer) output() []byte { + buf := bytes.NewBuffer(nil) + buf.WriteString(string(r.prompt)) + if r.cfg.EnableMask && len(r.buf) > 0 { + buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1))) + if r.buf[len(r.buf)-1] == '\n' { + buf.Write([]byte{'\n'}) + } else { + buf.Write([]byte(string(r.cfg.MaskRune))) + } + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + + } else { + for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) { + if e == '\t' { + buf.WriteString(strings.Repeat(" ", TabWidth)) + } else { + buf.WriteRune(e) + } + } + if r.isInLineEdge() { + buf.Write([]byte(" \b")) + } + } + // cursor position + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + return buf.Bytes() +} + +func (r *RuneBuffer) getBackspaceSequence() []byte { + var sep = map[int]bool{} + + var i int + for { + if i >= runes.WidthAll(r.buf) { + break + } + + if i == 0 { + i -= r.promptLen() + } + i += r.width + + sep[i] = true + } + var buf []byte + for i := len(r.buf); i > r.idx; i-- { + // move input to the left of one + buf = append(buf, '\b') + if sep[i] { + // up one line, go to the start of the line and move cursor right to the end (r.width) + buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...) + } + } + + return buf + +} + +func (r *RuneBuffer) Reset() []rune { + ret := runes.Copy(r.buf) + r.buf = r.buf[:0] + r.idx = 0 + return ret +} + +func (r *RuneBuffer) calWidth(m int) int { + if m > 0 { + return runes.WidthAll(r.buf[r.idx : r.idx+m]) + } + return runes.WidthAll(r.buf[r.idx+m : r.idx]) +} + +func (r *RuneBuffer) SetStyle(start, end int, style string) { + if end < start { + panic("end < start") + } + + // goto start + move := start - r.idx + if move > 0 { + r.w.Write([]byte(string(r.buf[r.idx : r.idx+move]))) + } else { + r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move))) + } + r.w.Write([]byte("\033[" + style + "m")) + r.w.Write([]byte(string(r.buf[start:end]))) + r.w.Write([]byte("\033[0m")) + // TODO: move back +} + +func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) { + r.Refresh(func() { + r.buf = buf + r.idx = idx + }) +} + +func (r *RuneBuffer) Set(buf []rune) { + r.SetWithIdx(len(buf), buf) +} + +func (r *RuneBuffer) SetPrompt(prompt string) { + r.Lock() + r.prompt = []rune(prompt) + r.Unlock() +} + +func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) { + buf := bufio.NewWriter(w) + + if r.width == 0 { + buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen())) + buf.Write([]byte("\033[J")) + } else { + buf.Write([]byte("\033[J")) // just like ^k :) + if idxLine == 0 { + buf.WriteString("\033[2K") + buf.WriteString("\r") + } else { + for i := 0; i < idxLine; i++ { + io.WriteString(buf, "\033[2K\r\033[A") + } + io.WriteString(buf, "\033[2K\r") + } + } + buf.Flush() + return +} + +func (r *RuneBuffer) Clean() { + r.Lock() + r.clean() + r.Unlock() +} + +func (r *RuneBuffer) clean() { + r.cleanWithIdxLine(r.idxLine(r.width)) +} + +func (r *RuneBuffer) cleanWithIdxLine(idxLine int) { + if r.hadClean || !r.interactive { + return + } + r.hadClean = true + r.cleanOutput(r.w, idxLine) +} diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go new file mode 100644 index 00000000000..a669bc48c30 --- /dev/null +++ b/vendor/github.com/chzyer/readline/runes.go @@ -0,0 +1,223 @@ +package readline + +import ( + "bytes" + "unicode" + "unicode/utf8" +) + +var runes = Runes{} +var TabWidth = 4 + +type Runes struct{} + +func (Runes) EqualRune(a, b rune, fold bool) bool { + if a == b { + return true + } + if !fold { + return false + } + if a > b { + a, b = b, a + } + if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' { + if b == a+'a'-'A' { + return true + } + } + return false +} + +func (r Runes) EqualRuneFold(a, b rune) bool { + return r.EqualRune(a, b, true) +} + +func (r Runes) EqualFold(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if r.EqualRuneFold(a[i], b[i]) { + continue + } + return false + } + + return true +} + +func (Runes) Equal(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int { + for i := len(r) - len(sub); i >= 0; i-- { + found := true + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +// Search in runes from end to front +func (rs Runes) IndexAllBck(r, sub []rune) int { + return rs.IndexAllBckEx(r, sub, false) +} + +// Search in runes from front to end +func (rs Runes) IndexAll(r, sub []rune) int { + return rs.IndexAllEx(r, sub, false) +} + +func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int { + for i := 0; i < len(r); i++ { + found := true + if len(r[i:]) < len(sub) { + return -1 + } + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +func (Runes) Index(r rune, rs []rune) int { + for i := 0; i < len(rs); i++ { + if rs[i] == r { + return i + } + } + return -1 +} + +func (Runes) ColorFilter(r []rune) []rune { + newr := make([]rune, 0, len(r)) + for pos := 0; pos < len(r); pos++ { + if r[pos] == '\033' && r[pos+1] == '[' { + idx := runes.Index('m', r[pos+2:]) + if idx == -1 { + continue + } + pos += idx + 2 + continue + } + newr = append(newr, r[pos]) + } + return newr +} + +var zeroWidth = []*unicode.RangeTable{ + unicode.Mn, + unicode.Me, + unicode.Cc, + unicode.Cf, +} + +var doubleWidth = []*unicode.RangeTable{ + unicode.Han, + unicode.Hangul, + unicode.Hiragana, + unicode.Katakana, +} + +func (Runes) Width(r rune) int { + if r == '\t' { + return TabWidth + } + if unicode.IsOneOf(zeroWidth, r) { + return 0 + } + if unicode.IsOneOf(doubleWidth, r) { + return 2 + } + return 1 +} + +func (Runes) WidthAll(r []rune) (length int) { + for i := 0; i < len(r); i++ { + length += runes.Width(r[i]) + } + return +} + +func (Runes) Backspace(r []rune) []byte { + return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r)) +} + +func (Runes) Copy(r []rune) []rune { + n := make([]rune, len(r)) + copy(n, r) + return n +} + +func (Runes) HasPrefixFold(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.EqualFold(r[:len(prefix)], prefix) +} + +func (Runes) HasPrefix(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.Equal(r[:len(prefix)], prefix) +} + +func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) { + for i := 0; i < len(candicate[0]); i++ { + for j := 0; j < len(candicate)-1; j++ { + if i >= len(candicate[j]) || i >= len(candicate[j+1]) { + goto aggregate + } + if candicate[j][i] != candicate[j+1][i] { + goto aggregate + } + } + size = i + 1 + } +aggregate: + if size > 0 { + same = runes.Copy(candicate[0][:size]) + for i := 0; i < len(candicate); i++ { + n := runes.Copy(candicate[i]) + copy(n, n[size:]) + candicate[i] = n[:len(n)-size] + } + } + return +} + +func (Runes) TrimSpaceLeft(in []rune) []rune { + firstIndex := len(in) + for i, r := range in { + if unicode.IsSpace(r) == false { + firstIndex = i + break + } + } + return in[firstIndex:] +} diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go new file mode 100644 index 00000000000..52e8ff09953 --- /dev/null +++ b/vendor/github.com/chzyer/readline/search.go @@ -0,0 +1,164 @@ +package readline + +import ( + "bytes" + "container/list" + "fmt" + "io" +) + +const ( + S_STATE_FOUND = iota + S_STATE_FAILING +) + +const ( + S_DIR_BCK = iota + S_DIR_FWD +) + +type opSearch struct { + inMode bool + state int + dir int + source *list.Element + w io.Writer + buf *RuneBuffer + data []rune + history *opHistory + cfg *Config + markStart int + markEnd int + width int +} + +func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch { + return &opSearch{ + w: w, + buf: buf, + cfg: cfg, + history: history, + width: width, + } +} + +func (o *opSearch) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opSearch) IsSearchMode() bool { + return o.inMode +} + +func (o *opSearch) SearchBackspace() { + if len(o.data) > 0 { + o.data = o.data[:len(o.data)-1] + o.search(true) + } +} + +func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) { + if o.dir == S_DIR_BCK { + return o.history.FindBck(isNewSearch, o.data, o.buf.idx) + } + return o.history.FindFwd(isNewSearch, o.data, o.buf.idx) +} + +func (o *opSearch) search(isChange bool) bool { + if len(o.data) == 0 { + o.state = S_STATE_FOUND + o.SearchRefresh(-1) + return true + } + idx, elem := o.findHistoryBy(isChange) + if elem == nil { + o.SearchRefresh(-2) + return false + } + o.history.current = elem + + item := o.history.showItem(o.history.current.Value) + start, end := 0, 0 + if o.dir == S_DIR_BCK { + start, end = idx, idx+len(o.data) + } else { + start, end = idx, idx+len(o.data) + idx += len(o.data) + } + o.buf.SetWithIdx(idx, item) + o.markStart, o.markEnd = start, end + o.SearchRefresh(idx) + return true +} + +func (o *opSearch) SearchChar(r rune) { + o.data = append(o.data, r) + o.search(true) +} + +func (o *opSearch) SearchMode(dir int) bool { + if o.width == 0 { + return false + } + alreadyInMode := o.inMode + o.inMode = true + o.dir = dir + o.source = o.history.current + if alreadyInMode { + o.search(false) + } else { + o.SearchRefresh(-1) + } + return true +} + +func (o *opSearch) ExitSearchMode(revert bool) { + if revert { + o.history.current = o.source + o.buf.Set(o.history.showItem(o.history.current.Value)) + } + o.markStart, o.markEnd = 0, 0 + o.state = S_STATE_FOUND + o.inMode = false + o.source = nil + o.data = nil +} + +func (o *opSearch) SearchRefresh(x int) { + if x == -2 { + o.state = S_STATE_FAILING + } else if x >= 0 { + o.state = S_STATE_FOUND + } + if x < 0 { + x = o.buf.idx + } + x = o.buf.CurrentWidth(x) + x += o.buf.PromptLen() + x = x % o.width + + if o.markStart > 0 { + o.buf.SetStyle(o.markStart, o.markEnd, "4") + } + + lineCnt := o.buf.CursorLineCount() + buf := bytes.NewBuffer(nil) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + buf.WriteString("\033[J") + if o.state == S_STATE_FAILING { + buf.WriteString("failing ") + } + if o.dir == S_DIR_BCK { + buf.WriteString("bck") + } else if o.dir == S_DIR_FWD { + buf.WriteString("fwd") + } + buf.WriteString("-i-search: ") + buf.WriteString(string(o.data)) // keyword + buf.WriteString("\033[4m \033[0m") // _ + fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev + if x > 0 { + fmt.Fprintf(buf, "\033[%dC", x) // move forward + } + o.w.Write(buf.Bytes()) +} diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go new file mode 100644 index 00000000000..61d44b75974 --- /dev/null +++ b/vendor/github.com/chzyer/readline/std.go @@ -0,0 +1,197 @@ +package readline + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +var ( + Stdin io.ReadCloser = os.Stdin + Stdout io.WriteCloser = os.Stdout + Stderr io.WriteCloser = os.Stderr +) + +var ( + std *Instance + stdOnce sync.Once +) + +// global instance will not submit history automatic +func getInstance() *Instance { + stdOnce.Do(func() { + std, _ = NewEx(&Config{ + DisableAutoSaveHistory: true, + }) + }) + return std +} + +// let readline load history from filepath +// and try to persist history into disk +// set fp to "" to prevent readline persisting history to disk +// so the `AddHistory` will return nil error forever. +func SetHistoryPath(fp string) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.HistoryFile = fp + ins.SetConfig(cfg) +} + +// set auto completer to global instance +func SetAutoComplete(completer AutoCompleter) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.AutoComplete = completer + ins.SetConfig(cfg) +} + +// add history to global instance manually +// raise error only if `SetHistoryPath` is set with a non-empty path +func AddHistory(content string) error { + ins := getInstance() + return ins.SaveHistory(content) +} + +func Password(prompt string) ([]byte, error) { + ins := getInstance() + return ins.ReadPassword(prompt) +} + +// readline with global configs +func Line(prompt string) (string, error) { + ins := getInstance() + ins.SetPrompt(prompt) + return ins.Readline() +} + +type CancelableStdin struct { + r io.Reader + mutex sync.Mutex + stop chan struct{} + closed int32 + notify chan struct{} + data []byte + read int + err error +} + +func NewCancelableStdin(r io.Reader) *CancelableStdin { + c := &CancelableStdin{ + r: r, + notify: make(chan struct{}), + stop: make(chan struct{}), + } + go c.ioloop() + return c +} + +func (c *CancelableStdin) ioloop() { +loop: + for { + select { + case <-c.notify: + c.read, c.err = c.r.Read(c.data) + select { + case c.notify <- struct{}{}: + case <-c.stop: + break loop + } + case <-c.stop: + break loop + } + } +} + +func (c *CancelableStdin) Read(b []byte) (n int, err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if atomic.LoadInt32(&c.closed) == 1 { + return 0, io.EOF + } + + c.data = b + select { + case c.notify <- struct{}{}: + case <-c.stop: + return 0, io.EOF + } + select { + case <-c.notify: + return c.read, c.err + case <-c.stop: + return 0, io.EOF + } +} + +func (c *CancelableStdin) Close() error { + if atomic.CompareAndSwapInt32(&c.closed, 0, 1) { + close(c.stop) + } + return nil +} + +// FillableStdin is a stdin reader which can prepend some data before +// reading into the real stdin +type FillableStdin struct { + sync.Mutex + stdin io.Reader + stdinBuffer io.ReadCloser + buf []byte + bufErr error +} + +// NewFillableStdin gives you FillableStdin +func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) { + r, w := io.Pipe() + s := &FillableStdin{ + stdinBuffer: r, + stdin: stdin, + } + s.ioloop() + return s, w +} + +func (s *FillableStdin) ioloop() { + go func() { + for { + bufR := make([]byte, 100) + var n int + n, s.bufErr = s.stdinBuffer.Read(bufR) + if s.bufErr != nil { + if s.bufErr == io.ErrClosedPipe { + break + } + } + s.Lock() + s.buf = append(s.buf, bufR[:n]...) + s.Unlock() + } + }() +} + +// Read will read from the local buffer and if no data, read from stdin +func (s *FillableStdin) Read(p []byte) (n int, err error) { + s.Lock() + i := len(s.buf) + if len(p) < i { + i = len(p) + } + if i > 0 { + n := copy(p, s.buf) + s.buf = s.buf[:0] + cerr := s.bufErr + s.bufErr = nil + s.Unlock() + return n, cerr + } + s.Unlock() + n, err = s.stdin.Read(p) + return n, err +} + +func (s *FillableStdin) Close() error { + s.stdinBuffer.Close() + return nil +} diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go new file mode 100644 index 00000000000..b10f91bcb7e --- /dev/null +++ b/vendor/github.com/chzyer/readline/std_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package readline + +func init() { + Stdin = NewRawReader() + Stdout = NewANSIWriter(Stdout) + Stderr = NewANSIWriter(Stderr) +} diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go new file mode 100644 index 00000000000..133993ca8ea --- /dev/null +++ b/vendor/github.com/chzyer/readline/term.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := getTermios(fd) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + + if termios, err := getTermios(fd); err != nil { + return nil, err + } else { + oldState.termios = *termios + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + // newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + return &oldState, setTermios(fd, &newState) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := getTermios(fd) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + return setTermios(fd, &state.termios) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + oldState, err := getTermios(fd) + if err != nil { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if err := setTermios(fd, newState); err != nil { + return nil, err + } + + defer func() { + setTermios(fd, oldState) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go new file mode 100644 index 00000000000..68b56ea6ba7 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go new file mode 100644 index 00000000000..e3392b4ac2d --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_linux.go @@ -0,0 +1,33 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package readline + +import ( + "syscall" + "unsafe" +) + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go new file mode 100644 index 00000000000..4c27273c7ab --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_solaris.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package readline + +import "golang.org/x/sys/unix" + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} + +type Termios unix.Termios + +func getTermios(fd int) (*Termios, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + return (*Termios)(termios), nil +} + +func setTermios(fd int, termios *Termios) error { + return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios)) +} diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go new file mode 100644 index 00000000000..d3ea242448d --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_unix.go @@ -0,0 +1,24 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +type Termios syscall.Termios + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + var dimensions [4]uint16 + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) + if err != 0 { + return 0, 0, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go new file mode 100644 index 00000000000..1290e00bc14 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_windows.go @@ -0,0 +1,171 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(syscall.Handle(fd), buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go new file mode 100644 index 00000000000..1078631c14a --- /dev/null +++ b/vendor/github.com/chzyer/readline/terminal.go @@ -0,0 +1,238 @@ +package readline + +import ( + "bufio" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" +) + +type Terminal struct { + m sync.Mutex + cfg *Config + outchan chan rune + closed int32 + stopChan chan struct{} + kickChan chan struct{} + wg sync.WaitGroup + isReading int32 + sleeping int32 + + sizeChan chan string +} + +func NewTerminal(cfg *Config) (*Terminal, error) { + if err := cfg.Init(); err != nil { + return nil, err + } + t := &Terminal{ + cfg: cfg, + kickChan: make(chan struct{}, 1), + outchan: make(chan rune), + stopChan: make(chan struct{}, 1), + sizeChan: make(chan string, 1), + } + + go t.ioloop() + return t, nil +} + +// SleepToResume will sleep myself, and return only if I'm resumed. +func (t *Terminal) SleepToResume() { + if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) { + return + } + defer atomic.StoreInt32(&t.sleeping, 0) + + t.ExitRawMode() + ch := WaitForResume() + SuspendMe() + <-ch + t.EnterRawMode() +} + +func (t *Terminal) EnterRawMode() (err error) { + return t.cfg.FuncMakeRaw() +} + +func (t *Terminal) ExitRawMode() (err error) { + return t.cfg.FuncExitRaw() +} + +func (t *Terminal) Write(b []byte) (int, error) { + return t.cfg.Stdout.Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +func (t *Terminal) WriteStdin(b []byte) (int, error) { + return t.cfg.StdinWriter.Write(b) +} + +type termSize struct { + left int + top int +} + +func (t *Terminal) GetOffset(f func(offset string)) { + go func() { + f(<-t.sizeChan) + }() + t.Write([]byte("\033[6n")) +} + +func (t *Terminal) Print(s string) { + fmt.Fprintf(t.cfg.Stdout, "%s", s) +} + +func (t *Terminal) PrintRune(r rune) { + fmt.Fprintf(t.cfg.Stdout, "%c", r) +} + +func (t *Terminal) Readline() *Operation { + return NewOperation(t, t.cfg) +} + +// return rune(0) if meet EOF +func (t *Terminal) ReadRune() rune { + ch, ok := <-t.outchan + if !ok { + return rune(0) + } + return ch +} + +func (t *Terminal) IsReading() bool { + return atomic.LoadInt32(&t.isReading) == 1 +} + +func (t *Terminal) KickRead() { + select { + case t.kickChan <- struct{}{}: + default: + } +} + +func (t *Terminal) ioloop() { + t.wg.Add(1) + defer func() { + t.wg.Done() + close(t.outchan) + }() + + var ( + isEscape bool + isEscapeEx bool + expectNextChar bool + ) + + buf := bufio.NewReader(t.getStdin()) + for { + if !expectNextChar { + atomic.StoreInt32(&t.isReading, 0) + select { + case <-t.kickChan: + atomic.StoreInt32(&t.isReading, 1) + case <-t.stopChan: + return + } + } + expectNextChar = false + r, _, err := buf.ReadRune() + if err != nil { + if strings.Contains(err.Error(), "interrupted system call") { + expectNextChar = true + continue + } + break + } + + if isEscape { + isEscape = false + if r == CharEscapeEx { + expectNextChar = true + isEscapeEx = true + continue + } + r = escapeKey(r, buf) + } else if isEscapeEx { + isEscapeEx = false + if key := readEscKey(r, buf); key != nil { + r = escapeExKey(key) + // offset + if key.typ == 'R' { + if _, _, ok := key.Get2(); ok { + select { + case t.sizeChan <- key.attr: + default: + } + } + expectNextChar = true + continue + } + } + if r == 0 { + expectNextChar = true + continue + } + } + + expectNextChar = true + switch r { + case CharEsc: + if t.cfg.VimMode { + t.outchan <- r + break + } + isEscape = true + case CharInterrupt, CharEnter, CharCtrlJ, CharDelete: + expectNextChar = false + fallthrough + default: + t.outchan <- r + } + } + +} + +func (t *Terminal) Bell() { + fmt.Fprintf(t, "%c", CharBell) +} + +func (t *Terminal) Close() error { + if atomic.SwapInt32(&t.closed, 1) != 0 { + return nil + } + if closer, ok := t.cfg.Stdin.(io.Closer); ok { + closer.Close() + } + close(t.stopChan) + t.wg.Wait() + return t.ExitRawMode() +} + +func (t *Terminal) GetConfig() *Config { + t.m.Lock() + cfg := *t.cfg + t.m.Unlock() + return &cfg +} + +func (t *Terminal) getStdin() io.Reader { + t.m.Lock() + r := t.cfg.Stdin + t.m.Unlock() + return r +} + +func (t *Terminal) SetConfig(c *Config) error { + if err := c.Init(); err != nil { + return err + } + t.m.Lock() + t.cfg = c + t.m.Unlock() + return nil +} diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go new file mode 100644 index 00000000000..af4e005216f --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils.go @@ -0,0 +1,277 @@ +package readline + +import ( + "bufio" + "bytes" + "container/list" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + "unicode" +) + +var ( + isWindows = false +) + +const ( + CharLineStart = 1 + CharBackward = 2 + CharInterrupt = 3 + CharDelete = 4 + CharLineEnd = 5 + CharForward = 6 + CharBell = 7 + CharCtrlH = 8 + CharTab = 9 + CharCtrlJ = 10 + CharKill = 11 + CharCtrlL = 12 + CharEnter = 13 + CharNext = 14 + CharPrev = 16 + CharBckSearch = 18 + CharFwdSearch = 19 + CharTranspose = 20 + CharCtrlU = 21 + CharCtrlW = 23 + CharCtrlY = 25 + CharCtrlZ = 26 + CharEsc = 27 + CharEscapeEx = 91 + CharBackspace = 127 +) + +const ( + MetaBackward rune = -iota - 1 + MetaForward + MetaDelete + MetaBackspace + MetaTranspose +) + +// WaitForResume need to call before current process got suspend. +// It will run a ticker until a long duration is occurs, +// which means this process is resumed. +func WaitForResume() chan struct{} { + ch := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + t := time.Now() + wg.Done() + for { + now := <-ticker.C + if now.Sub(t) > 100*time.Millisecond { + break + } + t = now + } + ticker.Stop() + ch <- struct{}{} + }() + wg.Wait() + return ch +} + +func Restore(fd int, state *State) error { + err := restoreTerm(fd, state) + if err != nil { + // errno 0 means everything is ok :) + if err.Error() == "errno 0" { + return nil + } else { + return err + } + } + return nil +} + +func IsPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// translate Esc[X +func escapeExKey(key *escapeKeyPair) rune { + var r rune + switch key.typ { + case 'D': + r = CharBackward + case 'C': + r = CharForward + case 'A': + r = CharPrev + case 'B': + r = CharNext + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + case '~': + if key.attr == "3" { + r = CharDelete + } + default: + } + return r +} + +type escapeKeyPair struct { + attr string + typ rune +} + +func (e *escapeKeyPair) Get2() (int, int, bool) { + sp := strings.Split(e.attr, ";") + if len(sp) < 2 { + return -1, -1, false + } + s1, err := strconv.Atoi(sp[0]) + if err != nil { + return -1, -1, false + } + s2, err := strconv.Atoi(sp[1]) + if err != nil { + return -1, -1, false + } + return s1, s2, true +} + +func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair { + p := escapeKeyPair{} + buf := bytes.NewBuffer(nil) + for { + if r == ';' { + } else if unicode.IsNumber(r) { + } else { + p.typ = r + break + } + buf.WriteRune(r) + r, _, _ = reader.ReadRune() + } + p.attr = buf.String() + return &p +} + +// translate EscX to Meta+X +func escapeKey(r rune, reader *bufio.Reader) rune { + switch r { + case 'b': + r = MetaBackward + case 'f': + r = MetaForward + case 'd': + r = MetaDelete + case CharTranspose: + r = MetaTranspose + case CharBackspace: + r = MetaBackspace + case 'O': + d, _, _ := reader.ReadRune() + switch d { + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + default: + reader.UnreadRune() + } + case CharEsc: + + } + return r +} + +func SplitByLine(start, screenWidth int, rs []rune) []string { + var ret []string + buf := bytes.NewBuffer(nil) + currentWidth := start + for _, r := range rs { + w := runes.Width(r) + currentWidth += w + buf.WriteRune(r) + if currentWidth >= screenWidth { + ret = append(ret, buf.String()) + buf.Reset() + currentWidth = 0 + } + } + ret = append(ret, buf.String()) + return ret +} + +// calculate how many lines for N character +func LineCount(screenWidth, w int) int { + r := w / screenWidth + if w%screenWidth != 0 { + r++ + } + return r +} + +func IsWordBreak(i rune) bool { + switch { + case i >= 'a' && i <= 'z': + case i >= 'A' && i <= 'Z': + case i >= '0' && i <= '9': + default: + return true + } + return false +} + +func GetInt(s []string, def int) int { + if len(s) == 0 { + return def + } + c, err := strconv.Atoi(s[0]) + if err != nil { + return def + } + return c +} + +type RawMode struct { + state *State +} + +func (r *RawMode) Enter() (err error) { + r.state, err = MakeRaw(GetStdin()) + return err +} + +func (r *RawMode) Exit() error { + if r.state == nil { + return nil + } + return Restore(GetStdin(), r.state) +} + +// ----------------------------------------------------------------------------- + +func sleep(n int) { + Debug(n) + time.Sleep(2000 * time.Millisecond) +} + +// print a linked list to Debug() +func debugList(l *list.List) { + idx := 0 + for e := l.Front(); e != nil; e = e.Next() { + Debug(idx, fmt.Sprintf("%+v", e.Value)) + idx++ + } +} + +// append log info to another file +func Debug(o ...interface{}) { + f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + fmt.Fprintln(f, o...) + f.Close() +} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go new file mode 100644 index 00000000000..f88dac97bd7 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_unix.go @@ -0,0 +1,83 @@ +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +package readline + +import ( + "io" + "os" + "os/signal" + "sync" + "syscall" +) + +type winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +// SuspendMe use to send suspend signal to myself, when we in the raw mode. +// For OSX it need to send to parent's pid +// For Linux it need to send to myself +func SuspendMe() { + p, _ := os.FindProcess(os.Getppid()) + p.Signal(syscall.SIGTSTP) + p, _ = os.FindProcess(os.Getpid()) + p.Signal(syscall.SIGTSTP) +} + +// get width of the terminal +func getWidth(stdoutFd int) int { + cols, _, err := GetSize(stdoutFd) + if err != nil { + return -1 + } + return cols +} + +func GetScreenWidth() int { + w := getWidth(syscall.Stdout) + if w < 0 { + w = getWidth(syscall.Stderr) + } + return w +} + +// ClearScreen clears the console screen +func ClearScreen(w io.Writer) (int, error) { + return w.Write([]byte("\033[H")) +} + +func DefaultIsTerminal() bool { + return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr)) +} + +func GetStdin() int { + return syscall.Stdin +} + +// ----------------------------------------------------------------------------- + +var ( + widthChange sync.Once + widthChangeCallback func() +) + +func DefaultOnWidthChanged(f func()) { + widthChangeCallback = f + widthChange.Do(func() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGWINCH) + + go func() { + for { + _, ok := <-ch + if !ok { + break + } + widthChangeCallback() + } + }() + }) +} diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go new file mode 100644 index 00000000000..5bfa55dcce8 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package readline + +import ( + "io" + "syscall" +) + +func SuspendMe() { +} + +func GetStdin() int { + return int(syscall.Stdin) +} + +func init() { + isWindows = true +} + +// get width of the terminal +func GetScreenWidth() int { + info, _ := GetConsoleScreenBufferInfo() + if info == nil { + return -1 + } + return int(info.dwSize.x) +} + +// ClearScreen clears the console screen +func ClearScreen(_ io.Writer) error { + return SetConsoleCursorPosition(&_COORD{0, 0}) +} + +func DefaultIsTerminal() bool { + return true +} + +func DefaultOnWidthChanged(func()) { + +} diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go new file mode 100644 index 00000000000..bedf2c1a693 --- /dev/null +++ b/vendor/github.com/chzyer/readline/vim.go @@ -0,0 +1,176 @@ +package readline + +const ( + VIM_NORMAL = iota + VIM_INSERT + VIM_VISUAL +) + +type opVim struct { + cfg *Config + op *Operation + vimMode int +} + +func newVimMode(op *Operation) *opVim { + ov := &opVim{ + cfg: op.cfg, + op: op, + } + ov.SetVimMode(ov.cfg.VimMode) + return ov +} + +func (o *opVim) SetVimMode(on bool) { + if o.cfg.VimMode && !on { // turn off + o.ExitVimMode() + } + o.cfg.VimMode = on + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) IsEnableVimMode() bool { + return o.cfg.VimMode +} + +func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'h': + t = CharBackward + case 'j': + t = CharNext + case 'k': + t = CharPrev + case 'l': + t = CharForward + case '0', '^': + rb.MoveToLineStart() + case '$': + rb.MoveToLineEnd() + case 'x': + rb.Delete() + if rb.IsCursorInEnd() { + rb.MoveBackward() + } + case 'r': + rb.Replace(readNext()) + case 'd': + next := readNext() + switch next { + case 'd': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + case 'p': + rb.Yank() + case 'b', 'B': + rb.MoveToPrevWord() + case 'w', 'W': + rb.MoveToNextWord() + case 'e', 'E': + rb.MoveToEndWord() + case 'f', 'F', 't', 'T': + next := readNext() + prevChar := r == 't' || r == 'T' + reverse := r == 'F' || r == 'T' + switch next { + case CharEsc: + default: + rb.MoveTo(next, prevChar, reverse) + } + default: + return r, false + } + return t, true +} + +func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'i': + case 'I': + rb.MoveToLineStart() + case 'a': + rb.MoveForward() + case 'A': + rb.MoveToLineEnd() + case 's': + rb.Delete() + case 'S': + rb.Erase() + case 'c': + next := readNext() + switch next { + case 'c': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + default: + return r, false + } + + o.EnterVimInsertMode() + return +} + +func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) { + switch r { + case CharEnter, CharInterrupt: + o.ExitVimMode() + return r + } + + if r, handled := o.handleVimNormalMovement(r, readNext); handled { + return r + } + + if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled { + return r + } + + // invalid operation + o.op.t.Bell() + return 0 +} + +func (o *opVim) EnterVimInsertMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimInsertMode() { + o.vimMode = VIM_NORMAL +} + +func (o *opVim) HandleVim(r rune, readNext func() rune) rune { + if o.vimMode == VIM_NORMAL { + return o.HandleVimNormal(r, readNext) + } + if r == CharEsc { + o.ExitVimInsertMode() + return 0 + } + + switch o.vimMode { + case VIM_INSERT: + return r + case VIM_VISUAL: + } + return r +} diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go new file mode 100644 index 00000000000..63f4f7b78fc --- /dev/null +++ b/vendor/github.com/chzyer/readline/windows_api.go @@ -0,0 +1,152 @@ +// +build windows + +package readline + +import ( + "reflect" + "syscall" + "unsafe" +) + +var ( + kernel = NewKernel() + stdout = uintptr(syscall.Stdout) + stdin = uintptr(syscall.Stdin) +) + +type Kernel struct { + SetConsoleCursorPosition, + SetConsoleTextAttribute, + FillConsoleOutputCharacterW, + FillConsoleOutputAttribute, + ReadConsoleInputW, + GetConsoleScreenBufferInfo, + GetConsoleCursorInfo, + GetStdHandle CallFunc +} + +type short int16 +type word uint16 +type dword uint32 +type wchar uint16 + +type _COORD struct { + x short + y short +} + +func (c *_COORD) ptr() uintptr { + return uintptr(*(*int32)(unsafe.Pointer(c))) +} + +const ( + EVENT_KEY = 0x0001 + EVENT_MOUSE = 0x0002 + EVENT_WINDOW_BUFFER_SIZE = 0x0004 + EVENT_MENU = 0x0008 + EVENT_FOCUS = 0x0010 +) + +type _KEY_EVENT_RECORD struct { + bKeyDown int32 + wRepeatCount word + wVirtualKeyCode word + wVirtualScanCode word + unicodeChar wchar + dwControlKeyState dword +} + +// KEY_EVENT_RECORD KeyEvent; +// MOUSE_EVENT_RECORD MouseEvent; +// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent; +// MENU_EVENT_RECORD MenuEvent; +// FOCUS_EVENT_RECORD FocusEvent; +type _INPUT_RECORD struct { + EventType word + Padding uint16 + Event [16]byte +} + +type _CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize _COORD + dwCursorPosition _COORD + wAttributes word + srWindow _SMALL_RECT + dwMaximumWindowSize _COORD +} + +type _SMALL_RECT struct { + left short + top short + right short + bottom short +} + +type _CONSOLE_CURSOR_INFO struct { + dwSize dword + bVisible bool +} + +type CallFunc func(u ...uintptr) error + +func NewKernel() *Kernel { + k := &Kernel{} + kernel32 := syscall.NewLazyDLL("kernel32.dll") + v := reflect.ValueOf(k).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + f := kernel32.NewProc(name) + v.Field(i).Set(reflect.ValueOf(k.Wrap(f))) + } + return k +} + +func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc { + return func(args ...uintptr) error { + var r0 uintptr + var e1 syscall.Errno + size := uintptr(len(args)) + if len(args) <= 3 { + buf := make([]uintptr, 3) + copy(buf, args) + r0, _, e1 = syscall.Syscall(p.Addr(), size, + buf[0], buf[1], buf[2]) + } else { + buf := make([]uintptr, 6) + copy(buf, args) + r0, _, e1 = syscall.Syscall6(p.Addr(), size, + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], + ) + } + + if int(r0) == 0 { + if e1 != 0 { + return error(e1) + } else { + return syscall.EINVAL + } + } + return nil + } + +} + +func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) { + t := new(_CONSOLE_SCREEN_BUFFER_INFO) + err := kernel.GetConsoleScreenBufferInfo( + stdout, + uintptr(unsafe.Pointer(t)), + ) + return t, err +} + +func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) { + t := new(_CONSOLE_CURSOR_INFO) + err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t))) + return t, err +} + +func SetConsoleCursorPosition(c *_COORD) error { + return kernel.SetConsoleCursorPosition(stdout, c.ptr()) +} diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/cgroups/stats/v1/doc.go b/vendor/github.com/containerd/cgroups/stats/v1/doc.go new file mode 100644 index 00000000000..23f3cdd4b37 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1 diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go new file mode 100644 index 00000000000..6d2d41770b9 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go @@ -0,0 +1,6125 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/cgroups/stats/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Metrics struct { + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` + MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metrics) Reset() { *m = Metrics{} } +func (*Metrics) ProtoMessage() {} +func (*Metrics) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{0} +} +func (m *Metrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metrics.Merge(m, src) +} +func (m *Metrics) XXX_Size() int { + return m.Size() +} +func (m *Metrics) XXX_DiscardUnknown() { + xxx_messageInfo_Metrics.DiscardUnknown(m) +} + +var xxx_messageInfo_Metrics proto.InternalMessageInfo + +type HugetlbStat struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } +func (*HugetlbStat) ProtoMessage() {} +func (*HugetlbStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{1} +} +func (m *HugetlbStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HugetlbStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_HugetlbStat.Merge(m, src) +} +func (m *HugetlbStat) XXX_Size() int { + return m.Size() +} +func (m *HugetlbStat) XXX_DiscardUnknown() { + xxx_messageInfo_HugetlbStat.DiscardUnknown(m) +} + +var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo + +type PidsStat struct { + Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PidsStat) Reset() { *m = PidsStat{} } +func (*PidsStat) ProtoMessage() {} +func (*PidsStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{2} +} +func (m *PidsStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PidsStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_PidsStat.Merge(m, src) +} +func (m *PidsStat) XXX_Size() int { + return m.Size() +} +func (m *PidsStat) XXX_DiscardUnknown() { + xxx_messageInfo_PidsStat.DiscardUnknown(m) +} + +var xxx_messageInfo_PidsStat proto.InternalMessageInfo + +type CPUStat struct { + Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` + Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUStat) Reset() { *m = CPUStat{} } +func (*CPUStat) ProtoMessage() {} +func (*CPUStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{3} +} +func (m *CPUStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUStat.Merge(m, src) +} +func (m *CPUStat) XXX_Size() int { + return m.Size() +} +func (m *CPUStat) XXX_DiscardUnknown() { + xxx_messageInfo_CPUStat.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUStat proto.InternalMessageInfo + +type CPUUsage struct { + // values in nanoseconds + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` + User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` + PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUUsage) Reset() { *m = CPUUsage{} } +func (*CPUUsage) ProtoMessage() {} +func (*CPUUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{4} +} +func (m *CPUUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPUUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUUsage.Merge(m, src) +} +func (m *CPUUsage) XXX_Size() int { + return m.Size() +} +func (m *CPUUsage) XXX_DiscardUnknown() { + xxx_messageInfo_CPUUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUUsage proto.InternalMessageInfo + +type Throttle struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Throttle) Reset() { *m = Throttle{} } +func (*Throttle) ProtoMessage() {} +func (*Throttle) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{5} +} +func (m *Throttle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Throttle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Throttle.Merge(m, src) +} +func (m *Throttle) XXX_Size() int { + return m.Size() +} +func (m *Throttle) XXX_DiscardUnknown() { + xxx_messageInfo_Throttle.DiscardUnknown(m) +} + +var xxx_messageInfo_Throttle proto.InternalMessageInfo + +type MemoryStat struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` + RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` + RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` + MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` + Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` + Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` + PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` + PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` + PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` + PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` + InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` + ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` + InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` + ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` + Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` + HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` + HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` + TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` + TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` + TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` + TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` + TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` + TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` + TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` + TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` + TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` + TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` + TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` + TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` + TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` + TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` + TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` + Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` + Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` + Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` + KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryStat) Reset() { *m = MemoryStat{} } +func (*MemoryStat) ProtoMessage() {} +func (*MemoryStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{6} +} +func (m *MemoryStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryStat.Merge(m, src) +} +func (m *MemoryStat) XXX_Size() int { + return m.Size() +} +func (m *MemoryStat) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryStat.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryStat proto.InternalMessageInfo + +type MemoryEntry struct { + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } +func (*MemoryEntry) ProtoMessage() {} +func (*MemoryEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{7} +} +func (m *MemoryEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryEntry.Merge(m, src) +} +func (m *MemoryEntry) XXX_Size() int { + return m.Size() +} +func (m *MemoryEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo + +type MemoryOomControl struct { + OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` + UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` + OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } +func (*MemoryOomControl) ProtoMessage() {} +func (*MemoryOomControl) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{8} +} +func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryOomControl) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryOomControl.Merge(m, src) +} +func (m *MemoryOomControl) XXX_Size() int { + return m.Size() +} +func (m *MemoryOomControl) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo + +type BlkIOStat struct { + IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } +func (*BlkIOStat) ProtoMessage() {} +func (*BlkIOStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{9} +} +func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOStat.Merge(m, src) +} +func (m *BlkIOStat) XXX_Size() int { + return m.Size() +} +func (m *BlkIOStat) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOStat.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo + +type BlkIOEntry struct { + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } +func (*BlkIOEntry) ProtoMessage() {} +func (*BlkIOEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{10} +} +func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlkIOEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlkIOEntry.Merge(m, src) +} +func (m *BlkIOEntry) XXX_Size() int { + return m.Size() +} +func (m *BlkIOEntry) XXX_DiscardUnknown() { + xxx_messageInfo_BlkIOEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo + +type RdmaStat struct { + Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` + Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaStat) Reset() { *m = RdmaStat{} } +func (*RdmaStat) ProtoMessage() {} +func (*RdmaStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{11} +} +func (m *RdmaStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaStat.Merge(m, src) +} +func (m *RdmaStat) XXX_Size() int { + return m.Size() +} +func (m *RdmaStat) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaStat.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaStat proto.InternalMessageInfo + +type RdmaEntry struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` + HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } +func (*RdmaEntry) ProtoMessage() {} +func (*RdmaEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{12} +} +func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RdmaEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_RdmaEntry.Merge(m, src) +} +func (m *RdmaEntry) XXX_Size() int { + return m.Size() +} +func (m *RdmaEntry) XXX_DiscardUnknown() { + xxx_messageInfo_RdmaEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo + +type NetworkStat struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkStat) Reset() { *m = NetworkStat{} } +func (*NetworkStat) ProtoMessage() {} +func (*NetworkStat) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{13} +} +func (m *NetworkStat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetworkStat) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkStat.Merge(m, src) +} +func (m *NetworkStat) XXX_Size() int { + return m.Size() +} +func (m *NetworkStat) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkStat.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkStat proto.InternalMessageInfo + +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{14} +} +func (m *CgroupStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CgroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_CgroupStats.Merge(m, src) +} +func (m *CgroupStats) XXX_Size() int { + return m.Size() +} +func (m *CgroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_CgroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_CgroupStats proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") + proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") + proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") + proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") + proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") + proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") + proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") + proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") + proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") + proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") + proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") + proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") + proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") + proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") + proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") +} + +func init() { + proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa) +} + +var fileDescriptor_a17b2d87c332bfaa = []byte{ + // 1749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, + 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, + 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, + 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, + 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, + 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, + 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, + 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, + 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, + 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, + 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, + 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, + 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, + 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, + 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, + 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, + 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, + 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, + 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, + 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, + 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, + 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, + 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, + 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, + 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, + 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, + 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, + 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, + 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, + 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, + 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, + 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, + 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, + 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, + 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, + 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, + 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, + 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, + 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, + 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, + 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, + 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, + 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, + 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, + 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, + 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, + 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, + 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, + 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, + 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, + 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, + 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, + 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, + 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, + 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, + 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, + 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, + 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, + 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, + 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, + 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, + 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, + 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, + 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, + 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, + 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, + 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, + 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, + 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, + 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, + 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, + 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, + 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, + 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, + 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, + 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, + 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, + 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, + 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, + 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, + 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, + 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, + 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, + 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, + 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, + 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, + 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, + 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, + 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, + 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, + 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, + 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, + 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, + 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, + 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, + 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, + 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, + 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, + 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, + 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, + 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, + 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, + 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, + 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, + 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, + 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, + 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, + 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, + 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, + 0x22, 0xc4, 0x11, 0x00, 0x00, +} + +func (m *Metrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MemoryOomControl != nil { + { + size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CgroupStats != nil { + { + size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Network) > 0 { + for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Rdma != nil { + { + size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Blkio != nil { + { + size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Pids != nil { + { + size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Hugetlb) > 0 { + for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Pagesize) > 0 { + i -= len(m.Pagesize) + copy(dAtA[i:], m.Pagesize) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) + i-- + dAtA[i] = 0x22 + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x18 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PidsStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if m.Current != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CPUStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Throttling != nil { + { + size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CPUUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PerCPU) > 0 { + dAtA11 := make([]byte, len(m.PerCPU)*10) + var j10 int + for _, num := range m.PerCPU { + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0x22 + } + if m.User != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.User)) + i-- + dAtA[i] = 0x18 + } + if m.Kernel != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) + i-- + dAtA[i] = 0x10 + } + if m.Total != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Throttle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ThrottledTime != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) + i-- + dAtA[i] = 0x18 + } + if m.ThrottledPeriods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) + i-- + dAtA[i] = 0x10 + } + if m.Periods != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.KernelTCP != nil { + { + size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if m.Kernel != nil { + { + size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if m.Swap != nil { + { + size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.Usage != nil { + { + size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + if m.TotalUnevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x80 + } + if m.TotalActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf8 + } + if m.TotalInactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.TotalActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if m.TotalInactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.TotalPgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.TotalPgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if m.TotalPgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + if m.TotalPgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 + } + if m.TotalWriteback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.TotalDirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if m.TotalMappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.TotalRSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.TotalRSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if m.TotalCache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.HierarchicalSwapLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.HierarchicalMemoryLimit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.Unevictable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) + i-- + dAtA[i] = 0x78 + } + if m.ActiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) + i-- + dAtA[i] = 0x70 + } + if m.InactiveFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) + i-- + dAtA[i] = 0x68 + } + if m.ActiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) + i-- + dAtA[i] = 0x60 + } + if m.InactiveAnon != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) + i-- + dAtA[i] = 0x58 + } + if m.PgMajFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) + i-- + dAtA[i] = 0x50 + } + if m.PgFault != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) + i-- + dAtA[i] = 0x48 + } + if m.PgPgOut != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) + i-- + dAtA[i] = 0x40 + } + if m.PgPgIn != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) + i-- + dAtA[i] = 0x38 + } + if m.Writeback != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) + i-- + dAtA[i] = 0x30 + } + if m.Dirty != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) + i-- + dAtA[i] = 0x28 + } + if m.MappedFile != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) + i-- + dAtA[i] = 0x20 + } + if m.RSSHuge != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) + i-- + dAtA[i] = 0x18 + } + if m.RSS != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) + i-- + dAtA[i] = 0x10 + } + if m.Cache != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Failcnt != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) + i-- + dAtA[i] = 0x20 + } + if m.Max != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x18 + } + if m.Usage != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) + i-- + dAtA[i] = 0x10 + } + if m.Limit != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OomKill != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) + i-- + dAtA[i] = 0x18 + } + if m.UnderOom != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) + i-- + dAtA[i] = 0x10 + } + if m.OomKillDisable != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SectorsRecursive) > 0 { + for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.IoTimeRecursive) > 0 { + for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.IoMergedRecursive) > 0 { + for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.IoQueuedRecursive) > 0 { + for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.IoServicedRecursive) > 0 { + for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.IoServiceBytesRecursive) > 0 { + for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x28 + } + if m.Minor != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) + i-- + dAtA[i] = 0x20 + } + if m.Major != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) + i-- + dAtA[i] = 0x18 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x12 + } + if len(m.Op) > 0 { + i -= len(m.Op) + copy(dAtA[i:], m.Op) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RdmaStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Limit) > 0 { + for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Current) > 0 { + for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.HcaObjects != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) + i-- + dAtA[i] = 0x18 + } + if m.HcaHandles != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) + i-- + dAtA[i] = 0x10 + } + if len(m.Device) > 0 { + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkStat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) + i-- + dAtA[i] = 0x48 + } + if m.TxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) + i-- + dAtA[i] = 0x40 + } + if m.TxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) + i-- + dAtA[i] = 0x38 + } + if m.TxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) + i-- + dAtA[i] = 0x30 + } + if m.RxDropped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) + i-- + dAtA[i] = 0x28 + } + if m.RxErrors != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) + i-- + dAtA[i] = 0x20 + } + if m.RxPackets != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) + i-- + dAtA[i] = 0x18 + } + if m.RxBytes != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CgroupStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NrIoWait != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) + i-- + dAtA[i] = 0x28 + } + if m.NrUninterruptible != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) + i-- + dAtA[i] = 0x20 + } + if m.NrStopped != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) + i-- + dAtA[i] = 0x18 + } + if m.NrRunning != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) + i-- + dAtA[i] = 0x10 + } + if m.NrSleeping != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Metrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hugetlb) > 0 { + for _, e := range m.Hugetlb { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Pids != nil { + l = m.Pids.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Blkio != nil { + l = m.Blkio.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Rdma != nil { + l = m.Rdma.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if len(m.Network) > 0 { + for _, e := range m.Network { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.CgroupStats != nil { + l = m.CgroupStats.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.MemoryOomControl != nil { + l = m.MemoryOomControl.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HugetlbStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + l = len(m.Pagesize) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PidsStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Current != 0 { + n += 1 + sovMetrics(uint64(m.Current)) + } + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Usage != nil { + l = m.Usage.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Throttling != nil { + l = m.Throttling.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CPUUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovMetrics(uint64(m.Total)) + } + if m.Kernel != 0 { + n += 1 + sovMetrics(uint64(m.Kernel)) + } + if m.User != 0 { + n += 1 + sovMetrics(uint64(m.User)) + } + if len(m.PerCPU) > 0 { + l = 0 + for _, e := range m.PerCPU { + l += sovMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Throttle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Periods != 0 { + n += 1 + sovMetrics(uint64(m.Periods)) + } + if m.ThrottledPeriods != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) + } + if m.ThrottledTime != 0 { + n += 1 + sovMetrics(uint64(m.ThrottledTime)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cache != 0 { + n += 1 + sovMetrics(uint64(m.Cache)) + } + if m.RSS != 0 { + n += 1 + sovMetrics(uint64(m.RSS)) + } + if m.RSSHuge != 0 { + n += 1 + sovMetrics(uint64(m.RSSHuge)) + } + if m.MappedFile != 0 { + n += 1 + sovMetrics(uint64(m.MappedFile)) + } + if m.Dirty != 0 { + n += 1 + sovMetrics(uint64(m.Dirty)) + } + if m.Writeback != 0 { + n += 1 + sovMetrics(uint64(m.Writeback)) + } + if m.PgPgIn != 0 { + n += 1 + sovMetrics(uint64(m.PgPgIn)) + } + if m.PgPgOut != 0 { + n += 1 + sovMetrics(uint64(m.PgPgOut)) + } + if m.PgFault != 0 { + n += 1 + sovMetrics(uint64(m.PgFault)) + } + if m.PgMajFault != 0 { + n += 1 + sovMetrics(uint64(m.PgMajFault)) + } + if m.InactiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.InactiveAnon)) + } + if m.ActiveAnon != 0 { + n += 1 + sovMetrics(uint64(m.ActiveAnon)) + } + if m.InactiveFile != 0 { + n += 1 + sovMetrics(uint64(m.InactiveFile)) + } + if m.ActiveFile != 0 { + n += 1 + sovMetrics(uint64(m.ActiveFile)) + } + if m.Unevictable != 0 { + n += 1 + sovMetrics(uint64(m.Unevictable)) + } + if m.HierarchicalMemoryLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) + } + if m.HierarchicalSwapLimit != 0 { + n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) + } + if m.TotalCache != 0 { + n += 2 + sovMetrics(uint64(m.TotalCache)) + } + if m.TotalRSS != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSS)) + } + if m.TotalRSSHuge != 0 { + n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) + } + if m.TotalMappedFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalMappedFile)) + } + if m.TotalDirty != 0 { + n += 2 + sovMetrics(uint64(m.TotalDirty)) + } + if m.TotalWriteback != 0 { + n += 2 + sovMetrics(uint64(m.TotalWriteback)) + } + if m.TotalPgPgIn != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) + } + if m.TotalPgPgOut != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) + } + if m.TotalPgFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgFault)) + } + if m.TotalPgMajFault != 0 { + n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) + } + if m.TotalInactiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) + } + if m.TotalActiveAnon != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) + } + if m.TotalInactiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) + } + if m.TotalActiveFile != 0 { + n += 2 + sovMetrics(uint64(m.TotalActiveFile)) + } + if m.TotalUnevictable != 0 { + n += 2 + sovMetrics(uint64(m.TotalUnevictable)) + } + if m.Usage != nil { + l = m.Usage.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Swap != nil { + l = m.Swap.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.Kernel != nil { + l = m.Kernel.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.KernelTCP != nil { + l = m.KernelTCP.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != 0 { + n += 1 + sovMetrics(uint64(m.Limit)) + } + if m.Usage != 0 { + n += 1 + sovMetrics(uint64(m.Usage)) + } + if m.Max != 0 { + n += 1 + sovMetrics(uint64(m.Max)) + } + if m.Failcnt != 0 { + n += 1 + sovMetrics(uint64(m.Failcnt)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MemoryOomControl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OomKillDisable != 0 { + n += 1 + sovMetrics(uint64(m.OomKillDisable)) + } + if m.UnderOom != 0 { + n += 1 + sovMetrics(uint64(m.UnderOom)) + } + if m.OomKill != 0 { + n += 1 + sovMetrics(uint64(m.OomKill)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.IoServiceBytesRecursive) > 0 { + for _, e := range m.IoServiceBytesRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServicedRecursive) > 0 { + for _, e := range m.IoServicedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoQueuedRecursive) > 0 { + for _, e := range m.IoQueuedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoServiceTimeRecursive) > 0 { + for _, e := range m.IoServiceTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoWaitTimeRecursive) > 0 { + for _, e := range m.IoWaitTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoMergedRecursive) > 0 { + for _, e := range m.IoMergedRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.IoTimeRecursive) > 0 { + for _, e := range m.IoTimeRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.SectorsRecursive) > 0 { + for _, e := range m.SectorsRecursive { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlkIOEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Op) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Major != 0 { + n += 1 + sovMetrics(uint64(m.Major)) + } + if m.Minor != 0 { + n += 1 + sovMetrics(uint64(m.Minor)) + } + if m.Value != 0 { + n += 1 + sovMetrics(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Current) > 0 { + for _, e := range m.Current { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Limit) > 0 { + for _, e := range m.Limit { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RdmaEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Device) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.HcaHandles != 0 { + n += 1 + sovMetrics(uint64(m.HcaHandles)) + } + if m.HcaObjects != 0 { + n += 1 + sovMetrics(uint64(m.HcaObjects)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NetworkStat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.RxBytes != 0 { + n += 1 + sovMetrics(uint64(m.RxBytes)) + } + if m.RxPackets != 0 { + n += 1 + sovMetrics(uint64(m.RxPackets)) + } + if m.RxErrors != 0 { + n += 1 + sovMetrics(uint64(m.RxErrors)) + } + if m.RxDropped != 0 { + n += 1 + sovMetrics(uint64(m.RxDropped)) + } + if m.TxBytes != 0 { + n += 1 + sovMetrics(uint64(m.TxBytes)) + } + if m.TxPackets != 0 { + n += 1 + sovMetrics(uint64(m.TxPackets)) + } + if m.TxErrors != 0 { + n += 1 + sovMetrics(uint64(m.TxErrors)) + } + if m.TxDropped != 0 { + n += 1 + sovMetrics(uint64(m.TxDropped)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CgroupStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NrSleeping != 0 { + n += 1 + sovMetrics(uint64(m.NrSleeping)) + } + if m.NrRunning != 0 { + n += 1 + sovMetrics(uint64(m.NrRunning)) + } + if m.NrStopped != 0 { + n += 1 + sovMetrics(uint64(m.NrStopped)) + } + if m.NrUninterruptible != 0 { + n += 1 + sovMetrics(uint64(m.NrUninterruptible)) + } + if m.NrIoWait != 0 { + n += 1 + sovMetrics(uint64(m.NrIoWait)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Metrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForHugetlb := "[]*HugetlbStat{" + for _, f := range this.Hugetlb { + repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," + } + repeatedStringForHugetlb += "}" + repeatedStringForNetwork := "[]*NetworkStat{" + for _, f := range this.Network { + repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," + } + repeatedStringForNetwork += "}" + s := strings.Join([]string{`&Metrics{`, + `Hugetlb:` + repeatedStringForHugetlb + `,`, + `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, + `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, + `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, + `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, + `Network:` + repeatedStringForNetwork + `,`, + `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, + `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *HugetlbStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HugetlbStat{`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *PidsStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PidsStat{`, + `Current:` + fmt.Sprintf("%v", this.Current) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUStat{`, + `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, + `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CPUUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CPUUsage{`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Throttle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Throttle{`, + `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, + `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, + `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryStat{`, + `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, + `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, + `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, + `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, + `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, + `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, + `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, + `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, + `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, + `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, + `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, + `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, + `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, + `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, + `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, + `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, + `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, + `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, + `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, + `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, + `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, + `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, + `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, + `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, + `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, + `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, + `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, + `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, + `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, + `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, + `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, + `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, + `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryEntry{`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryOomControl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryOomControl{`, + `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, + `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, + `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceBytesRecursive { + repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceBytesRecursive += "}" + repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServicedRecursive { + repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServicedRecursive += "}" + repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoQueuedRecursive { + repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoQueuedRecursive += "}" + repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoServiceTimeRecursive { + repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoServiceTimeRecursive += "}" + repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoWaitTimeRecursive { + repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoWaitTimeRecursive += "}" + repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoMergedRecursive { + repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoMergedRecursive += "}" + repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" + for _, f := range this.IoTimeRecursive { + repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForIoTimeRecursive += "}" + repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" + for _, f := range this.SectorsRecursive { + repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," + } + repeatedStringForSectorsRecursive += "}" + s := strings.Join([]string{`&BlkIOStat{`, + `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, + `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, + `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, + `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, + `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, + `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, + `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, + `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BlkIOEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlkIOEntry{`, + `Op:` + fmt.Sprintf("%v", this.Op) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Major:` + fmt.Sprintf("%v", this.Major) + `,`, + `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaStat) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrent := "[]*RdmaEntry{" + for _, f := range this.Current { + repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForCurrent += "}" + repeatedStringForLimit := "[]*RdmaEntry{" + for _, f := range this.Limit { + repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," + } + repeatedStringForLimit += "}" + s := strings.Join([]string{`&RdmaStat{`, + `Current:` + repeatedStringForCurrent + `,`, + `Limit:` + repeatedStringForLimit + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RdmaEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RdmaEntry{`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, + `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkStat) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkStat{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, + `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, + `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, + `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, + `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, + `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, + `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, + `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *CgroupStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupStats{`, + `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, + `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, + `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, + `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, + `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetrics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Metrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) + if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pids == nil { + m.Pids = &PidsStat{} + } + if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPU == nil { + m.CPU = &CPUStat{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &MemoryStat{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blkio == nil { + m.Blkio = &BlkIOStat{} + } + if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rdma == nil { + m.Rdma = &RdmaStat{} + } + if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = append(m.Network, &NetworkStat{}) + if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CgroupStats == nil { + m.CgroupStats = &CgroupStats{} + } + if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemoryOomControl == nil { + m.MemoryOomControl = &MemoryOomControl{} + } + if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HugetlbStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pagesize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PidsStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &CPUUsage{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttling == nil { + m.Throttling = &Throttle{} + } + if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + m.Kernel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kernel |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + m.User = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.User |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PerCPU) == 0 { + m.PerCPU = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PerCPU = append(m.PerCPU, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Throttle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Throttle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) + } + m.Periods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Periods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) + } + m.ThrottledPeriods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledPeriods |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) + } + m.ThrottledTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ThrottledTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + m.Cache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) + } + m.RSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) + } + m.RSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) + } + m.MappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) + } + m.Dirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Dirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) + } + m.Writeback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Writeback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) + } + m.PgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) + } + m.PgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) + } + m.PgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) + } + m.PgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) + } + m.InactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) + } + m.ActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) + } + m.InactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) + } + m.ActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) + } + m.Unevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Unevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) + } + m.HierarchicalMemoryLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) + } + m.HierarchicalSwapLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) + } + m.TotalCache = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalCache |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) + } + m.TotalRSS = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSS |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) + } + m.TotalRSSHuge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRSSHuge |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) + } + m.TotalMappedFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalMappedFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) + } + m.TotalDirty = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDirty |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) + } + m.TotalWriteback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalWriteback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) + } + m.TotalPgPgIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgIn |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) + } + m.TotalPgPgOut = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgPgOut |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) + } + m.TotalPgFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) + } + m.TotalPgMajFault = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalPgMajFault |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) + } + m.TotalInactiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) + } + m.TotalActiveAnon = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveAnon |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) + } + m.TotalInactiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalInactiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 31: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) + } + m.TotalActiveFile = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalActiveFile |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 32: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) + } + m.TotalUnevictable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalUnevictable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = &MemoryEntry{} + } + if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &MemoryEntry{} + } + if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kernel == nil { + m.Kernel = &MemoryEntry{} + } + if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KernelTCP == nil { + m.KernelTCP = &MemoryEntry{} + } + if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + m.Usage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Usage |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) + } + m.Failcnt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failcnt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) + } + m.OomKillDisable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKillDisable |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) + } + m.UnderOom = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnderOom |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) + } + m.OomKill = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomKill |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) + if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) + if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) + if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) + if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) + if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) + if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) + if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) + if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Op = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) + } + m.Major = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Major |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) + } + m.Minor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Minor |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Current = append(m.Current, &RdmaEntry{}) + if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limit = append(m.Limit, &RdmaEntry{}) + if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RdmaEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) + } + m.HcaHandles = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaHandles |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) + } + m.HcaObjects = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HcaObjects |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkStat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + m.RxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) + } + m.RxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) + } + m.RxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) + } + m.RxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + m.TxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxBytes |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) + } + m.TxPackets = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxPackets |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) + } + m.TxErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxErrors |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) + } + m.TxDropped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TxDropped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CgroupStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) + } + m.NrSleeping = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrSleeping |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) + } + m.NrRunning = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrRunning |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) + } + m.NrStopped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrStopped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) + } + m.NrUninterruptible = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrUninterruptible |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) + } + m.NrIoWait = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrIoWait |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt new file mode 100644 index 00000000000..e476cea6412 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt @@ -0,0 +1,790 @@ +file { + name: "github.com/containerd/cgroups/stats/v1/metrics.proto" + package: "io.containerd.cgroups.v1" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Metrics" + field { + name: "hugetlb" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.HugetlbStat" + json_name: "hugetlb" + } + field { + name: "pids" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.PidsStat" + json_name: "pids" + } + field { + name: "cpu" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUStat" + options { + 65004: "CPU" + } + json_name: "cpu" + } + field { + name: "memory" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryStat" + json_name: "memory" + } + field { + name: "blkio" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOStat" + json_name: "blkio" + } + field { + name: "rdma" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaStat" + json_name: "rdma" + } + field { + name: "network" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.NetworkStat" + json_name: "network" + } + field { + name: "cgroup_stats" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CgroupStats" + json_name: "cgroupStats" + } + field { + name: "memory_oom_control" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryOomControl" + json_name: "memoryOomControl" + } + } + message_type { + name: "HugetlbStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + field { + name: "pagesize" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "pagesize" + } + } + message_type { + name: "PidsStat" + field { + name: "current" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + } + message_type { + name: "CPUStat" + field { + name: "usage" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CPUUsage" + json_name: "usage" + } + field { + name: "throttling" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.Throttle" + json_name: "throttling" + } + } + message_type { + name: "CPUUsage" + field { + name: "total" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "total" + } + field { + name: "kernel" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "kernel" + } + field { + name: "user" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "user" + } + field { + name: "per_cpu" + number: 4 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + 65004: "PerCPU" + } + json_name: "perCpu" + } + } + message_type { + name: "Throttle" + field { + name: "periods" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "periods" + } + field { + name: "throttled_periods" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledPeriods" + } + field { + name: "throttled_time" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "throttledTime" + } + } + message_type { + name: "MemoryStat" + field { + name: "cache" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "cache" + } + field { + name: "rss" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSS" + } + json_name: "rss" + } + field { + name: "rss_huge" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "RSSHuge" + } + json_name: "rssHuge" + } + field { + name: "mapped_file" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "mappedFile" + } + field { + name: "dirty" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "dirty" + } + field { + name: "writeback" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "writeback" + } + field { + name: "pg_pg_in" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgIn" + } + field { + name: "pg_pg_out" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgPgOut" + } + field { + name: "pg_fault" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgFault" + } + field { + name: "pg_maj_fault" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "pgMajFault" + } + field { + name: "inactive_anon" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveAnon" + } + field { + name: "active_anon" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeAnon" + } + field { + name: "inactive_file" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "inactiveFile" + } + field { + name: "active_file" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "activeFile" + } + field { + name: "unevictable" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "unevictable" + } + field { + name: "hierarchical_memory_limit" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalMemoryLimit" + } + field { + name: "hierarchical_swap_limit" + number: 17 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "hierarchicalSwapLimit" + } + field { + name: "total_cache" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalCache" + } + field { + name: "total_rss" + number: 19 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSS" + } + json_name: "totalRss" + } + field { + name: "total_rss_huge" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65004: "TotalRSSHuge" + } + json_name: "totalRssHuge" + } + field { + name: "total_mapped_file" + number: 21 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalMappedFile" + } + field { + name: "total_dirty" + number: 22 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalDirty" + } + field { + name: "total_writeback" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalWriteback" + } + field { + name: "total_pg_pg_in" + number: 24 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgIn" + } + field { + name: "total_pg_pg_out" + number: 25 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgPgOut" + } + field { + name: "total_pg_fault" + number: 26 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgFault" + } + field { + name: "total_pg_maj_fault" + number: 27 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalPgMajFault" + } + field { + name: "total_inactive_anon" + number: 28 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveAnon" + } + field { + name: "total_active_anon" + number: 29 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveAnon" + } + field { + name: "total_inactive_file" + number: 30 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalInactiveFile" + } + field { + name: "total_active_file" + number: 31 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalActiveFile" + } + field { + name: "total_unevictable" + number: 32 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "totalUnevictable" + } + field { + name: "usage" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "usage" + } + field { + name: "swap" + number: 34 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "swap" + } + field { + name: "kernel" + number: 35 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + json_name: "kernel" + } + field { + name: "kernel_tcp" + number: 36 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.MemoryEntry" + options { + 65004: "KernelTCP" + } + json_name: "kernelTcp" + } + } + message_type { + name: "MemoryEntry" + field { + name: "limit" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "limit" + } + field { + name: "usage" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "usage" + } + field { + name: "max" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "max" + } + field { + name: "failcnt" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "failcnt" + } + } + message_type { + name: "MemoryOomControl" + field { + name: "oom_kill_disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKillDisable" + } + field { + name: "under_oom" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "underOom" + } + field { + name: "oom_kill" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "oomKill" + } + } + message_type { + name: "BlkIOStat" + field { + name: "io_service_bytes_recursive" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceBytesRecursive" + } + field { + name: "io_serviced_recursive" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServicedRecursive" + } + field { + name: "io_queued_recursive" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioQueuedRecursive" + } + field { + name: "io_service_time_recursive" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioServiceTimeRecursive" + } + field { + name: "io_wait_time_recursive" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioWaitTimeRecursive" + } + field { + name: "io_merged_recursive" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioMergedRecursive" + } + field { + name: "io_time_recursive" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "ioTimeRecursive" + } + field { + name: "sectors_recursive" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.BlkIOEntry" + json_name: "sectorsRecursive" + } + } + message_type { + name: "BlkIOEntry" + field { + name: "op" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "op" + } + field { + name: "device" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "major" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "major" + } + field { + name: "minor" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "minor" + } + field { + name: "value" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "value" + } + } + message_type { + name: "RdmaStat" + field { + name: "current" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "current" + } + field { + name: "limit" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.RdmaEntry" + json_name: "limit" + } + } + message_type { + name: "RdmaEntry" + field { + name: "device" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "device" + } + field { + name: "hca_handles" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaHandles" + } + field { + name: "hca_objects" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "hcaObjects" + } + } + message_type { + name: "NetworkStat" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "rx_bytes" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxBytes" + } + field { + name: "rx_packets" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxPackets" + } + field { + name: "rx_errors" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxErrors" + } + field { + name: "rx_dropped" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "rxDropped" + } + field { + name: "tx_bytes" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txBytes" + } + field { + name: "tx_packets" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txPackets" + } + field { + name: "tx_errors" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txErrors" + } + field { + name: "tx_dropped" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "txDropped" + } + } + message_type { + name: "CgroupStats" + field { + name: "nr_sleeping" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrSleeping" + } + field { + name: "nr_running" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrRunning" + } + field { + name: "nr_stopped" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrStopped" + } + field { + name: "nr_uninterruptible" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrUninterruptible" + } + field { + name: "nr_io_wait" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrIoWait" + } + } + syntax: "proto3" +} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto new file mode 100644 index 00000000000..b3f6cc37d8c --- /dev/null +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package io.containerd.cgroups.v1; + +import "gogoproto/gogo.proto"; + +message Metrics { + repeated HugetlbStat hugetlb = 1; + PidsStat pids = 2; + CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; + MemoryStat memory = 4; + BlkIOStat blkio = 5; + RdmaStat rdma = 6; + repeated NetworkStat network = 7; + CgroupStats cgroup_stats = 8; + MemoryOomControl memory_oom_control = 9; +} + +message HugetlbStat { + uint64 usage = 1; + uint64 max = 2; + uint64 failcnt = 3; + string pagesize = 4; +} + +message PidsStat { + uint64 current = 1; + uint64 limit = 2; +} + +message CPUStat { + CPUUsage usage = 1; + Throttle throttling = 2; +} + +message CPUUsage { + // values in nanoseconds + uint64 total = 1; + uint64 kernel = 2; + uint64 user = 3; + repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; + +} + +message Throttle { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message MemoryStat { + uint64 cache = 1; + uint64 rss = 2 [(gogoproto.customname) = "RSS"]; + uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; + uint64 mapped_file = 4; + uint64 dirty = 5; + uint64 writeback = 6; + uint64 pg_pg_in = 7; + uint64 pg_pg_out = 8; + uint64 pg_fault = 9; + uint64 pg_maj_fault = 10; + uint64 inactive_anon = 11; + uint64 active_anon = 12; + uint64 inactive_file = 13; + uint64 active_file = 14; + uint64 unevictable = 15; + uint64 hierarchical_memory_limit = 16; + uint64 hierarchical_swap_limit = 17; + uint64 total_cache = 18; + uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; + uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; + uint64 total_mapped_file = 21; + uint64 total_dirty = 22; + uint64 total_writeback = 23; + uint64 total_pg_pg_in = 24; + uint64 total_pg_pg_out = 25; + uint64 total_pg_fault = 26; + uint64 total_pg_maj_fault = 27; + uint64 total_inactive_anon = 28; + uint64 total_active_anon = 29; + uint64 total_inactive_file = 30; + uint64 total_active_file = 31; + uint64 total_unevictable = 32; + MemoryEntry usage = 33; + MemoryEntry swap = 34; + MemoryEntry kernel = 35; + MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; + +} + +message MemoryEntry { + uint64 limit = 1; + uint64 usage = 2; + uint64 max = 3; + uint64 failcnt = 4; +} + +message MemoryOomControl { + uint64 oom_kill_disable = 1; + uint64 under_oom = 2; + uint64 oom_kill = 3; +} + +message BlkIOStat { + repeated BlkIOEntry io_service_bytes_recursive = 1; + repeated BlkIOEntry io_serviced_recursive = 2; + repeated BlkIOEntry io_queued_recursive = 3; + repeated BlkIOEntry io_service_time_recursive = 4; + repeated BlkIOEntry io_wait_time_recursive = 5; + repeated BlkIOEntry io_merged_recursive = 6; + repeated BlkIOEntry io_time_recursive = 7; + repeated BlkIOEntry sectors_recursive = 8; +} + +message BlkIOEntry { + string op = 1; + string device = 2; + uint64 major = 3; + uint64 minor = 4; + uint64 value = 5; +} + +message RdmaStat { + repeated RdmaEntry current = 1; + repeated RdmaEntry limit = 2; +} + +message RdmaEntry { + string device = 1; + uint32 hca_handles = 2; + uint32 hca_objects = 3; +} + +message NetworkStat { + string name = 1; + uint64 rx_bytes = 2; + uint64 rx_packets = 3; + uint64 rx_errors = 4; + uint64 rx_dropped = 5; + uint64 tx_bytes = 6; + uint64 tx_packets = 7; + uint64 tx_errors = 8; + uint64 tx_dropped = 9; +} + +// CgroupStats exports per-cgroup statistics. +message CgroupStats { + // number of tasks sleeping + uint64 nr_sleeping = 1; + // number of tasks running + uint64 nr_running = 2; + // number of tasks in stopped state + uint64 nr_stopped = 3; + // number of tasks in uninterruptible state + uint64 nr_uninterruptible = 4; + // number of tasks waiting on IO + uint64 nr_io_wait = 5; +} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 00000000000..584149b6ee2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 00000000000..8915f02773f --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 00000000000..87622559708 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + "errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 00000000000..7a9b33e05af --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = fmt.Errorf("%s: %w", msg, cls) + } else { + err = cls + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 00000000000..0db9562b82b --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,69 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +const ( + // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to + // ensure the formatted time is always the same number of characters. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + + // TextFormat represents the text logging format + TextFormat = "text" + + // JSONFormat represents the JSON logging format + JSONFormat = "json" +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + e := logger.WithContext(ctx) + return context.WithValue(ctx, loggerKey{}, e) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L.WithContext(ctx) + } + + return logger.(*logrus.Entry) +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 00000000000..3913ef66373 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,203 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 00000000000..046e0356d19 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "fmt" + "os" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + cpuVariantValue = getCPUVariant() + } + }) + return cpuVariantValue +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 00000000000..dbe9957ca9d --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,116 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64", "amd64": + arch = "amd64" + if variant == "v1" { + variant = "" + } + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 00000000000..cfa3ff34a19 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go new file mode 100644 index 00000000000..e249fe48d38 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -0,0 +1,45 @@ +//go:build darwin +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 00000000000..49690f1b3e7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,41 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 00000000000..c1aaf72ca8e --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,91 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + imagespec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +type matchComparer struct { + defaults Matcher + osVersionPrefix string +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m matchComparer) Match(p imagespec.Platform) bool { + if m.defaults.Match(p) { + // TODO(windows): Figure out whether OSVersion is deprecated. + return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) + } + return false +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + major, minor, build := windows.RtlGetNtVersionNumbers() + return matchComparer{ + defaults: Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 00000000000..8f955d036df --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,261 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "fmt" + "path" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + return platform +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 00000000000..0da3efe4c21 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,662 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression + ctx context.Context +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) + } + } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, fmt.Errorf("failed to sort: %w", err) + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, fmt.Errorf("failed to sort tar entries: %w", err) + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReader(in) + if err != nil { + return nil, fmt.Errorf("failed to make position watcher: %w", err) + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return fmt.Errorf("file: %q: %w", name, errNotFound) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReader(r io.ReaderAt) (*countReader, error) { + pos := int64(0) + return &countReader{r: r, cPos: &pos}, nil +} + +type countReader struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReader) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReader) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReader) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 00000000000..6de78b02dcd --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 00000000000..921e59ec6ef --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1041 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + for _, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + fr := &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + } + return io.ReadFull(dr, p) +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, fmt.Errorf("failed to parse footer: %w", err) + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + if err := w.closeGz(); err != nil { + return err + } + + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + ent.Offset = w.cw.n + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := io.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 00000000000..591d7a62e11 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 00000000000..8f27dfb3ea2 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2008 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" +) + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + CountStreams(*testing.T, []byte) int + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingController) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + chunkSize int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl) + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(tt.chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl, wantData, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl, wantData, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { + aGz, err := controller.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := controller.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := io.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := io.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset > 0) == (b.Offset > 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl) + } + }) + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, fmt.Errorf("error reading footer: %w", err) + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) + } + + // Decode the TOC JSON + tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingController) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + tests := []struct { + name string + chunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // empty tar + TOC + footer + wantNumGzLossLess: 3, // empty tar + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + } + + for _, tt := range tests { + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + w := NewWriterWithCompressor(&stargzBuf, cl) + w.ChunkSize = tt.chunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + got := cl.CountStreams(t, b) + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + if got != wantNumGz { + t.Errorf("number of streams = %d; want %d", got, wantNumGz) + } + + telemetry, checkCalled := newCalledTelemetry() + r, err := Open( + io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), + WithDecompressors(cl), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + if err := checkCalled(); err != nil { + t.Errorf("telemetry failure: %v", err) + } + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +func newCalledTelemetry() (telemetry *Telemetry, check func() error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func() error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 00000000000..384ff7fd7f2 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,316 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + NumLink int + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + Writer(w io.Writer) (io.WriteCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range + // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} diff --git a/vendor/github.com/containers/buildah/LICENSE b/vendor/github.com/containers/buildah/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/containers/buildah/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go new file mode 100644 index 00000000000..568be203cf6 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/build.go @@ -0,0 +1,273 @@ +package define + +import ( + "io" + "time" + + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage/pkg/archive" + "golang.org/x/sync/semaphore" +) + +// CommonBuildOptions are resources that can be defined by flags for both buildah from and build +type CommonBuildOptions struct { + // AddHost is the list of hostnames to add to the build container's /etc/hosts. + AddHost []string + // CgroupParent is the path to cgroups under which the cgroup for the container will be created. + CgroupParent string + // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period + CPUPeriod uint64 + // CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota + CPUQuota int64 + // CPUShares (relative weight + CPUShares uint64 + // CPUSetCPUs in which to allow execution (0-3, 0,1) + CPUSetCPUs string + // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + CPUSetMems string + // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container. + HTTPProxy bool + // IdentityLabel if set ensures that default `io.buildah.version` label is not applied to build image. + IdentityLabel types.OptionalBool + // Memory is the upper limit (in bytes) on how much memory running containers can use. + Memory int64 + // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf + DNSSearch []string + // DNSServers is the list of DNS servers to add to the build container's /etc/resolv.conf + DNSServers []string + // DNSOptions is the list of DNS + DNSOptions []string + // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". + // Recognized field names are "role", "type", and "level". + LabelOpts []string + // MemorySwap limits the amount of memory and swap together. + MemorySwap int64 + // NoHosts tells the builder not to create /etc/hosts content when running + // containers. + NoHosts bool + // OmitTimestamp forces epoch 0 as created timestamp to allow for + // deterministic, content-addressable builds. + OmitTimestamp bool + // SeccompProfilePath is the pathname of a seccomp profile. + SeccompProfilePath string + // ApparmorProfile is the name of an apparmor profile. + ApparmorProfile string + // ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory. + ShmSize string + // Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit]. + // These types are recognized: + // "core": maximum core dump size (ulimit -c) + // "cpu": maximum CPU time (ulimit -t) + // "data": maximum size of a process's data segment (ulimit -d) + // "fsize": maximum size of new files (ulimit -f) + // "locks": maximum number of file locks (ulimit -x) + // "memlock": maximum amount of locked memory (ulimit -l) + // "msgqueue": maximum amount of data in message queues (ulimit -q) + // "nice": niceness adjustment (nice -n, ulimit -e) + // "nofile": maximum number of open files (ulimit -n) + // "nproc": maximum number of processes (ulimit -u) + // "rss": maximum size of a process's (ulimit -m) + // "rtprio": maximum real-time scheduling priority (ulimit -r) + // "rttime": maximum amount of real-time execution between blocking syscalls + // "sigpending": maximum number of pending signals (ulimit -i) + // "stack": maximum stack size (ulimit -s) + Ulimit []string + // Volumes to bind mount into the container + Volumes []string + // Secrets are the available secrets to use in a build. Each item in the + // slice takes the form "id=foo,src=bar", where both "id" and "src" are + // required, in that order, and "bar" is the name of a file. + Secrets []string + // SSHSources is the available ssh agent connections to forward in the build + SSHSources []string +} + +// BuildOptions can be used to alter how an image is built. +type BuildOptions struct { + // ContainerSuffix it the name to suffix containers with + ContainerSuffix string + // ContextDirectory is the default source location for COPY and ADD + // commands. + ContextDirectory string + // PullPolicy controls whether or not we pull images. It should be one + // of PullIfMissing, PullAlways, PullIfNewer, or PullNever. + PullPolicy PullPolicy + // Registry is a value which is prepended to the image's name, if it + // needs to be pulled and the image name alone can not be resolved to a + // reference to a source image. No separator is implicitly added. + Registry string + // IgnoreUnrecognizedInstructions tells us to just log instructions we + // don't recognize, and try to keep going. + IgnoreUnrecognizedInstructions bool + // Manifest Name to which the image will be added. + Manifest string + // Quiet tells us whether or not to announce steps as we go through them. + Quiet bool + // Isolation controls how Run() runs things. + Isolation Isolation + // Runtime is the name of the command to run for RUN instructions when + // Isolation is either IsolationDefault or IsolationOCI. It should + // accept the same arguments and flags that runc does. + Runtime string + // RuntimeArgs adds global arguments for the runtime. + RuntimeArgs []string + // TransientMounts is a list of mounts that won't be kept in the image. + TransientMounts []string + // Compression specifies the type of compression which is applied to + // layer blobs. The default is to not use compression, but + // archive.Gzip is recommended. + Compression archive.Compression + // Arguments which can be interpolated into Dockerfiles + Args map[string]string + // Name of the image to write to. + Output string + // BuildOutput specifies if any custom build output is selected for following build. + // It allows end user to export recently built rootfs into a directory or tar. + // See the documentation of 'buildah build --output' for the details of the format. + BuildOutput string + // Additional tags to add to the image that we write, if we know of a + // way to add them. + AdditionalTags []string + // Log is a callback that will print a progress message. If no value + // is supplied, the message will be sent to Err (or os.Stderr, if Err + // is nil) by default. + Log func(format string, args ...interface{}) + // In is connected to stdin for RUN instructions. + In io.Reader + // Out is a place where non-error log messages are sent. + Out io.Writer + // Err is a place where error log messages should be sent. + Err io.Writer + // SignaturePolicyPath specifies an override location for the signature + // policy which should be used for verifying the new image as it is + // being written. Except in specific circumstances, no value should be + // specified, indicating that the shared, system-wide default policy + // should be used. + SignaturePolicyPath string + // ReportWriter is an io.Writer which will be used to report the + // progress of the (possible) pulling of the source image and the + // writing of the new image. + ReportWriter io.Writer + // OutputFormat is the format of the output image's manifest and + // configuration data. + // Accepted values are buildah.OCIv1ImageManifest and buildah.Dockerv2ImageManifest. + OutputFormat string + // SystemContext holds parameters used for authentication. + SystemContext *types.SystemContext + // NamespaceOptions controls how we set up namespaces processes that we + // might need when handling RUN instructions. + NamespaceOptions []NamespaceOption + // ConfigureNetwork controls whether or not network interfaces and + // routing are configured for a new network namespace (i.e., when not + // joining another's namespace and not just using the host's + // namespace), effectively deciding whether or not the process has a + // usable network. + ConfigureNetwork NetworkConfigurationPolicy + // CNIPluginPath is the location of CNI plugin helpers, if they should be + // run from a location other than the default location. + CNIPluginPath string + // CNIConfigDir is the location of CNI configuration files, if the files in + // the default configuration directory shouldn't be used. + CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + + // ID mapping options to use if we're setting up our own user namespace + // when handling RUN instructions. + IDMappingOptions *IDMappingOptions + // AddCapabilities is a list of capabilities to add to the default set when + // handling RUN instructions. + AddCapabilities []string + // DropCapabilities is a list of capabilities to remove from the default set + // when handling RUN instructions. If a capability appears in both lists, it + // will be dropped. + DropCapabilities []string + // CommonBuildOpts is *required*. + CommonBuildOpts *CommonBuildOptions + // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format + DefaultMountsFilePath string + // IIDFile tells the builder to write the image ID to the specified file + IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool + // Labels metadata for an image + Labels []string + // Annotation metadata for an image + Annotations []string + // OnBuild commands to be run by images based on this image + OnBuild []string + // Layers tells the builder to create a cache of images for each step in the Dockerfile + Layers bool + // NoCache tells the builder to build the image from scratch without checking for a cache. + // It creates a new set of cached images for the build. + NoCache bool + // RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used + // during the build process. Default is true. + RemoveIntermediateCtrs bool + // ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if + // the build was unsuccessful. + ForceRmIntermediateCtrs bool + // BlobDirectory is a directory which we'll use for caching layer blobs. + BlobDirectory string + // Target the targeted FROM in the Dockerfile to build. + Target string + // Devices are the additional devices to add to the containers. + Devices []string + // SignBy is the fingerprint of a GPG key to use for signing images. + SignBy string + // Architecture specifies the target architecture of the image to be built. + Architecture string + // Timestamp sets the created timestamp to the specified time, allowing + // for deterministic, content-addressable builds. + Timestamp *time.Time + // OS is the specifies the operating system of the image to be built. + OS string + // MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one + // image from or to an external registry if the first attempt fails. + MaxPullPushRetries int + // PullPushRetryDelay is how long to wait before retrying a pull or push attempt. + PullPushRetryDelay time.Duration + // OciDecryptConfig contains the config that can be used to decrypt an image if it is + // encrypted if non-nil. If nil, it does not attempt to decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + // Jobs is the number of stages to run in parallel. If not specified it defaults to 1. + // Ignored if a JobSemaphore is provided. + Jobs *int + // JobSemaphore, for when you want Jobs to be shared with more than just this build. + JobSemaphore *semaphore.Weighted + // LogRusage logs resource usage for each step. + LogRusage bool + // File to which the Rusage logs will be saved to instead of stdout + RusageLogFile string + // Excludes is a list of excludes to be used instead of the .dockerignore file. + Excludes []string + // IgnoreFile is a name of the .containerignore file + IgnoreFile string + // From is the image name to use to replace the value specified in the first + // FROM instruction in the Containerfile + From string + // Platforms is the list of parsed OS/Arch/Variant triples that we want + // to build the image for. If this slice has items in it, the OS and + // Architecture fields above are ignored. + Platforms []struct{ OS, Arch, Variant string } + // AllPlatforms tells the builder to set the list of target platforms + // to match the set of platforms for which all of the build's base + // images are available. If this field is set, Platforms is ignored. + AllPlatforms bool + // UnsetEnvs is a list of environments to not add to final image. + UnsetEnvs []string + // Envs is a list of environment variables to set in the final image. + Envs []string + // OSFeatures specifies operating system features the image requires. + // It is typically only set when the OS is "windows". + OSFeatures []string + // OSVersion specifies the exact operating system version the image + // requires. It is typically only set when the OS is "windows". Any + // value set in a base image will be preserved, so this does not + // frequently need to be set. + OSVersion string +} diff --git a/vendor/github.com/containers/buildah/define/isolation.go b/vendor/github.com/containers/buildah/define/isolation.go new file mode 100644 index 00000000000..53bea85fdb0 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/isolation.go @@ -0,0 +1,32 @@ +package define + +import ( + "fmt" +) + +type Isolation int + +const ( + // IsolationDefault is whatever we think will work best. + IsolationDefault Isolation = iota + // IsolationOCI is a proper OCI runtime. + IsolationOCI + // IsolationChroot is a more chroot-like environment: less isolation, + // but with fewer requirements. + IsolationChroot + // IsolationOCIRootless is a proper OCI runtime in rootless mode. + IsolationOCIRootless +) + +// String converts a Isolation into a string. +func (i Isolation) String() string { + switch i { + case IsolationDefault, IsolationOCI: + return "oci" + case IsolationChroot: + return "chroot" + case IsolationOCIRootless: + return "rootless" + } + return fmt.Sprintf("unrecognized isolation type %d", i) +} diff --git a/vendor/github.com/containers/buildah/define/namespace.go b/vendor/github.com/containers/buildah/define/namespace.go new file mode 100644 index 00000000000..d0247fe9165 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/namespace.go @@ -0,0 +1,87 @@ +package define + +import ( + "fmt" +) + +// NamespaceOption controls how we set up a namespace when launching processes. +type NamespaceOption struct { + // Name specifies the type of namespace, typically matching one of the + // ...Namespace constants defined in + // github.com/opencontainers/runtime-spec/specs-go. + Name string + // Host is used to force our processes to use the host's namespace of + // this type. + Host bool + // Path is the path of the namespace to attach our process to, if Host + // is not set. If Host is not set and Path is also empty, a new + // namespace will be created for the process that we're starting. + // If Name is specs.NetworkNamespace, if Path doesn't look like an + // absolute path, it is treated as a comma-separated list of CNI + // configuration names which will be selected from among all of the CNI + // network configurations which we find. + Path string +} + +// NamespaceOptions provides some helper methods for a slice of NamespaceOption +// structs. +type NamespaceOptions []NamespaceOption + +// Find the configuration for the namespace of the given type. If there are +// duplicates, find the _last_ one of the type, since we assume it was appended +// more recently. +func (n *NamespaceOptions) Find(namespace string) *NamespaceOption { + for i := range *n { + j := len(*n) - 1 - i + if (*n)[j].Name == namespace { + return &((*n)[j]) + } + } + return nil +} + +// AddOrReplace either adds or replaces the configuration for a given namespace. +func (n *NamespaceOptions) AddOrReplace(options ...NamespaceOption) { +nextOption: + for _, option := range options { + for i := range *n { + j := len(*n) - 1 - i + if (*n)[j].Name == option.Name { + (*n)[j] = option + continue nextOption + } + } + *n = append(*n, option) + } +} + +// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled, +// or NetworkEnabled. +type NetworkConfigurationPolicy int + +const ( + // NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that the default behavior should be used. + NetworkDefault NetworkConfigurationPolicy = iota + // NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should NOT be configured for + // newly-created network namespaces. + NetworkDisabled + // NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork + // can take, signalling that network interfaces should be configured for + // newly-created network namespaces. + NetworkEnabled +) + +// String formats a NetworkConfigurationPolicy as a string. +func (p NetworkConfigurationPolicy) String() string { + switch p { + case NetworkDefault: + return "NetworkDefault" + case NetworkDisabled: + return "NetworkDisabled" + case NetworkEnabled: + return "NetworkEnabled" + } + return fmt.Sprintf("unknown NetworkConfigurationPolicy %d", p) +} diff --git a/vendor/github.com/containers/buildah/define/pull.go b/vendor/github.com/containers/buildah/define/pull.go new file mode 100644 index 00000000000..00787bd9b64 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/pull.go @@ -0,0 +1,50 @@ +package define + +import ( + "fmt" +) + +// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever. +type PullPolicy int + +const ( + // PullIfMissing is one of the values that BuilderOptions.PullPolicy + // can take, signalling that the source image should be pulled from a + // registry if a local copy of it is not already present. + PullIfMissing PullPolicy = iota + // PullAlways is one of the values that BuilderOptions.PullPolicy can + // take, signalling that a fresh, possibly updated, copy of the image + // should be pulled from a registry before the build proceeds. + PullAlways + // PullIfNewer is one of the values that BuilderOptions.PullPolicy + // can take, signalling that the source image should only be pulled + // from a registry if a local copy is not already present or if a + // newer version the image is present on the repository. + PullIfNewer + // PullNever is one of the values that BuilderOptions.PullPolicy can + // take, signalling that the source image should not be pulled from a + // registry. + PullNever +) + +// String converts a PullPolicy into a string. +func (p PullPolicy) String() string { + switch p { + case PullIfMissing: + return "missing" + case PullAlways: + return "always" + case PullIfNewer: + return "ifnewer" + case PullNever: + return "never" + } + return fmt.Sprintf("unrecognized policy %d", p) +} + +var PolicyMap = map[string]PullPolicy{ + "missing": PullIfMissing, + "always": PullAlways, + "never": PullNever, + "ifnewer": PullIfNewer, +} diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go new file mode 100644 index 00000000000..459a161cdd6 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/types.go @@ -0,0 +1,231 @@ +package define + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net/http" + urlpkg "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/containers/image/v5/manifest" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/ioutils" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // Package is the name of this package, used in help output and to + // identify working containers. + Package = "buildah" + // Version for the Package. Bump version in contrib/rpm/buildah.spec + // too. + Version = "1.26.1" + + // DefaultRuntime if containers.conf fails. + DefaultRuntime = "runc" + + // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, + // suitable for specifying as a value of the PreferredManifestType + // member of a CommitOptions structure. It is also the default. + OCIv1ImageManifest = v1.MediaTypeImageManifest + // Dockerv2ImageManifest is the MIME type of a Docker v2s2 image + // manifest, suitable for specifying as a value of the + // PreferredManifestType member of a CommitOptions structure. + Dockerv2ImageManifest = manifest.DockerV2Schema2MediaType + + // OCI used to define the "oci" image format + OCI = "oci" + // DOCKER used to define the "docker" image format + DOCKER = "docker" +) + +var ( + // DefaultCapabilities is the list of capabilities which we grant by + // default to containers which are running under UID 0. + DefaultCapabilities = []string{ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT", + } + // DefaultNetworkSysctl is the list of Kernel parameters which we + // grant by default to containers which are running under UID 0. + DefaultNetworkSysctl = map[string]string{ + "net.ipv4.ping_group_range": "0 0", + } + + Gzip = archive.Gzip + Bzip2 = archive.Bzip2 + Xz = archive.Xz + Zstd = archive.Zstd + Uncompressed = archive.Uncompressed +) + +// IDMappingOptions controls how we set up UID/GID mapping when we set up a +// user namespace. +type IDMappingOptions struct { + HostUIDMapping bool + HostGIDMapping bool + UIDMap []specs.LinuxIDMapping + GIDMap []specs.LinuxIDMapping +} + +// Secret is a secret source that can be used in a RUN +type Secret struct { + ID string + Source string + SourceType string +} + +// BuildOutputOptions contains the the outcome of parsing the value of a build --output flag +type BuildOutputOption struct { + Path string // Only valid if !IsStdout + IsDir bool + IsStdout bool +} + +// TempDirForURL checks if the passed-in string looks like a URL or -. If it is, +// TempDirForURL creates a temporary directory, arranges for its contents to be +// the contents of that URL, and returns the temporary directory's path, along +// with the name of a subdirectory which should be used as the build context +// (which may be empty or "."). Removal of the temporary directory is the +// responsibility of the caller. If the string doesn't look like a URL, +// TempDirForURL returns empty strings and a nil error code. +func TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) { + if !strings.HasPrefix(url, "http://") && + !strings.HasPrefix(url, "https://") && + !strings.HasPrefix(url, "git://") && + !strings.HasPrefix(url, "github.com/") && + url != "-" { + return "", "", nil + } + name, err = ioutil.TempDir(dir, prefix) + if err != nil { + return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url) + } + urlParsed, err := urlpkg.Parse(url) + if err != nil { + return "", "", errors.Wrapf(err, "error parsing url %q", url) + } + if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") { + combinedOutput, err := cloneToDirectory(url, name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput)) + } + return name, "", nil + } + if strings.HasPrefix(url, "github.com/") { + ghurl := url + url = fmt.Sprintf("https://%s/archive/master.tar.gz", ghurl) + logrus.Debugf("resolving url %q to %q", ghurl, url) + subdir = path.Base(ghurl) + "-master" + } + if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { + err = downloadToDirectory(url, name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", subdir, err + } + return name, subdir, nil + } + if url == "-" { + err = stdinToDirectory(name) + if err != nil { + if err2 := os.RemoveAll(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", subdir, err + } + logrus.Debugf("Build context is at %q", name) + return name, subdir, nil + } + logrus.Debugf("don't know how to retrieve %q", url) + if err2 := os.Remove(name); err2 != nil { + logrus.Debugf("error removing temporary directory %q: %v", name, err2) + } + return "", "", errors.Errorf("unreachable code reached") +} + +func cloneToDirectory(url, dir string) ([]byte, error) { + gitBranch := strings.Split(url, "#") + var cmd *exec.Cmd + if len(gitBranch) < 2 { + logrus.Debugf("cloning %q to %q", url, dir) + cmd = exec.Command("git", "clone", url, dir) + } else { + logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) + cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir) + } + return cmd.CombinedOutput() +} + +func downloadToDirectory(url, dir string) error { + logrus.Debugf("extracting %q to %q", url, dir) + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.ContentLength == 0 { + return errors.Errorf("no contents in %q", url) + } + if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil { + resp1, err := http.Get(url) + if err != nil { + return err + } + defer resp1.Body.Close() + body, err := ioutil.ReadAll(resp1.Body) + if err != nil { + return err + } + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile + if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil { + return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile) + } + } + return nil +} + +func stdinToDirectory(dir string) error { + logrus.Debugf("extracting stdin to %q", dir) + r := bufio.NewReader(os.Stdin) + b, err := ioutil.ReadAll(r) + if err != nil { + return errors.Wrapf(err, "Failed to read from stdin") + } + reader := bytes.NewReader(b) + if err := chrootarchive.Untar(reader, dir, nil); err != nil { + dockerfile := filepath.Join(dir, "Dockerfile") + // Assume this is a Dockerfile + if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil { + return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/define/types_unix.go b/vendor/github.com/containers/buildah/define/types_unix.go new file mode 100644 index 00000000000..aedadad368f --- /dev/null +++ b/vendor/github.com/containers/buildah/define/types_unix.go @@ -0,0 +1,9 @@ +// +build darwin linux + +package define + +import ( + "github.com/opencontainers/runc/libcontainer/devices" +) + +type ContainerDevices = []devices.Device diff --git a/vendor/github.com/containers/buildah/define/types_unsupported.go b/vendor/github.com/containers/buildah/define/types_unsupported.go new file mode 100644 index 00000000000..64e26d377e8 --- /dev/null +++ b/vendor/github.com/containers/buildah/define/types_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux,!darwin + +package define + +// ContainerDevices is currently not implemented. +type ContainerDevices = []struct{} diff --git a/vendor/github.com/containers/common/LICENSE b/vendor/github.com/containers/common/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/containers/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go new file mode 100644 index 00000000000..01cedc7ed49 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -0,0 +1,439 @@ +package libimage + +import ( + "context" + "io" + "os" + "strings" + "time" + + "github.com/containers/common/libimage/manifests" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/retry" + "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/signature" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + defaultMaxRetries = 3 + defaultRetryDelay = time.Second +) + +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc = manifests.LookupReferenceFunc + +// CopyOptions allow for customizing image-copy operations. +type CopyOptions struct { + // If set, will be used for copying the image. Fields below may + // override certain settings. + SystemContext *types.SystemContext + // Allows for customizing the source reference lookup. This can be + // used to use custom blob caches. + SourceLookupReferenceFunc LookupReferenceFunc + // Allows for customizing the destination reference lookup. This can + // be used to use custom blob caches. + DestinationLookupReferenceFunc LookupReferenceFunc + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int + + // containers-auth.json(5) file to use when authenticating against + // container registries. + AuthFilePath string + // Custom path to a blob-info cache. + BlobInfoCacheDirPath string + // Path to the certificates directory. + CertDirPath string + // Force layer compression when copying to a `dir` transport destination. + DirForceCompress bool + // Allow contacting registries over HTTP, or HTTPS with failed TLS + // verification. Note that this does not affect other TLS connections. + InsecureSkipTLSVerify types.OptionalBool + // Maximum number of retries with exponential backoff when facing + // transient network errors. A reasonable default is used if not set. + // Default 3. + MaxRetries *uint + // RetryDelay used for the exponential back off of MaxRetries. + // Default 1 time.Scond. + RetryDelay *time.Duration + // ManifestMIMEType is the desired media type the image will be + // converted to if needed. Note that it must contain the exact MIME + // types. Short forms (e.g., oci, v2s2) used by some tools are not + // supported. + ManifestMIMEType string + // Accept uncompressed layers when copying OCI images. + OciAcceptUncompressedLayers bool + // If OciEncryptConfig is non-nil, it indicates that an image should be + // encrypted. The encryption options is derived from the construction + // of EncryptConfig object. Note: During initial encryption process of + // a layer, the resultant digest is not known during creation, so + // newDigestingReader has to be set with validateDigest = false + OciEncryptConfig *encconfig.EncryptConfig + // OciEncryptLayers represents the list of layers to encrypt. If nil, + // don't encrypt any layers. If non-nil and len==0, denotes encrypt + // all layers. integers in the slice represent 0-indexed layer + // indices, with support for negative indexing. i.e. 0 is the first + // layer, -1 is the last (top-most) layer. + OciEncryptLayers *[]int + // OciDecryptConfig contains the config that can be used to decrypt an + // image if it is encrypted if non-nil. If nil, it does not attempt to + // decrypt an image. + OciDecryptConfig *encconfig.DecryptConfig + // Reported to when ProgressInterval has arrived for a single + // artifact+offset. + Progress chan types.ProgressProperties + // If set, allow using the storage transport even if it's disabled by + // the specified SignaturePolicyPath. + PolicyAllowStorage bool + // SignaturePolicyPath to overwrite the default one. + SignaturePolicyPath string + // If non-empty, asks for a signature to be added during the copy, and + // specifies a key ID. + SignBy string + // Remove any pre-existing signatures. SignBy will still add a new + // signature. + RemoveSignatures bool + // Writer is used to display copy information including progress bars. + Writer io.Writer + + // ----- platform ----------------------------------------------------- + + // Architecture to use for choosing images. + Architecture string + // OS to use for choosing images. + OS string + // Variant to use when choosing images. + Variant string + + // ----- credentials -------------------------------------------------- + + // Username to use when authenticating at a container registry. + Username string + // Password to use when authenticating at a container registry. + Password string + // Credentials is an alternative way to specify credentials in format + // "username[:password]". Cannot be used in combination with + // Username/Password. + Credentials string + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // ----- internal ----------------------------------------------------- + + // Additional tags when creating or copying a docker-archive. + dockerArchiveAdditionalTags []reference.NamedTagged +} + +// copier is an internal helper to conveniently copy images. +type copier struct { + imageCopyOptions copy.Options + retryOptions retry.RetryOptions + systemContext *types.SystemContext + policyContext *signature.PolicyContext + + sourceLookup LookupReferenceFunc + destinationLookup LookupReferenceFunc +} + +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} + +// getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns +// nil if no credentials are set. +func (options *CopyOptions) getDockerAuthConfig() (*types.DockerAuthConfig, error) { + authConf := &types.DockerAuthConfig{IdentityToken: options.IdentityToken} + + if options.Username != "" { + if options.Credentials != "" { + return nil, errors.New("username/password cannot be used with credentials") + } + authConf.Username = options.Username + authConf.Password = options.Password + return authConf, nil + } + + if options.Credentials != "" { + split := strings.SplitN(options.Credentials, ":", 2) + switch len(split) { + case 1: + authConf.Username = split[0] + default: + authConf.Username = split[0] + authConf.Password = split[1] + } + return authConf, nil + } + + // We should return nil unless a token was set. That's especially + // useful for Podman's remote API. + if options.IdentityToken != "" { + return authConf, nil + } + + return nil, nil +} + +// newCopier creates a copier. Note that fields in options *may* overwrite the +// counterparts of the specified system context. Please make sure to call +// `(*copier).close()`. +func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { + c := copier{} + c.systemContext = r.systemContextCopy() + + if options.SourceLookupReferenceFunc != nil { + c.sourceLookup = options.SourceLookupReferenceFunc + } + + if options.DestinationLookupReferenceFunc != nil { + c.destinationLookup = options.DestinationLookupReferenceFunc + } + + if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined { + c.systemContext.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify + c.systemContext.OCIInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + c.systemContext.DockerDaemonInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + } + + c.systemContext.DirForceCompress = c.systemContext.DirForceCompress || options.DirForceCompress + + if options.AuthFilePath != "" { + c.systemContext.AuthFilePath = options.AuthFilePath + } + + c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags + + c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = NormalizePlatform(options.OS, options.Architecture, options.Variant) + + if options.SignaturePolicyPath != "" { + c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath + } + + dockerAuthConfig, err := options.getDockerAuthConfig() + if err != nil { + return nil, err + } + if dockerAuthConfig != nil { + c.systemContext.DockerAuthConfig = dockerAuthConfig + } + + if options.BlobInfoCacheDirPath != "" { + c.systemContext.BlobInfoCacheDir = options.BlobInfoCacheDirPath + } + + if options.CertDirPath != "" { + c.systemContext.DockerCertPath = options.CertDirPath + } + + if options.CompressionFormat != nil { + c.systemContext.CompressionFormat = options.CompressionFormat + } + + if options.CompressionLevel != nil { + c.systemContext.CompressionLevel = options.CompressionLevel + } + + // NOTE: for the sake of consistency it's called Oci* in the CopyOptions. + c.systemContext.OCIAcceptUncompressedLayers = options.OciAcceptUncompressedLayers + + policy, err := signature.DefaultPolicy(c.systemContext) + if err != nil { + return nil, err + } + + // Buildah compatibility: even if the policy denies _all_ transports, + // Buildah still wants the storage to be accessible. + if options.PolicyAllowStorage { + policy.Transports[storageTransport.Transport.Name()] = storageAllowedPolicyScopes + } + + policyContext, err := signature.NewPolicyContext(policy) + if err != nil { + return nil, err + } + + c.policyContext = policyContext + + c.retryOptions.MaxRetry = defaultMaxRetries + if options.MaxRetries != nil { + c.retryOptions.MaxRetry = int(*options.MaxRetries) + } + c.retryOptions.Delay = defaultRetryDelay + if options.RetryDelay != nil { + c.retryOptions.Delay = *options.RetryDelay + } + + c.imageCopyOptions.Progress = options.Progress + if c.imageCopyOptions.Progress != nil { + c.imageCopyOptions.ProgressInterval = time.Second + } + + c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType + c.imageCopyOptions.SourceCtx = c.systemContext + c.imageCopyOptions.DestinationCtx = c.systemContext + c.imageCopyOptions.OciEncryptConfig = options.OciEncryptConfig + c.imageCopyOptions.OciEncryptLayers = options.OciEncryptLayers + c.imageCopyOptions.OciDecryptConfig = options.OciDecryptConfig + c.imageCopyOptions.RemoveSignatures = options.RemoveSignatures + c.imageCopyOptions.SignBy = options.SignBy + c.imageCopyOptions.ReportWriter = options.Writer + + defaultContainerConfig, err := config.Default() + if err != nil { + logrus.Warnf("Failed to get container config for copy options: %v", err) + } else { + c.imageCopyOptions.MaxParallelDownloads = defaultContainerConfig.Engine.ImageParallelCopies + } + + return &c, nil +} + +// close open resources. +func (c *copier) close() error { + return c.policyContext.Destroy() +} + +// copy the source to the destination. Returns the bytes of the copied +// manifest which may be used for digest computation. +func (c *copier) copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) { + logrus.Debugf("Copying source image %s to destination image %s", source.StringWithinTransport(), destination.StringWithinTransport()) + + var err error + + if c.sourceLookup != nil { + source, err = c.sourceLookup(source) + if err != nil { + return nil, err + } + } + + if c.destinationLookup != nil { + destination, err = c.destinationLookup(destination) + if err != nil { + return nil, err + } + } + + // Buildah compat: used when running in OpenShift. + sourceInsecure, err := checkRegistrySourcesAllows(source) + if err != nil { + return nil, err + } + destinationInsecure, err := checkRegistrySourcesAllows(destination) + if err != nil { + return nil, err + } + + // Sanity checks for Buildah. + if sourceInsecure != nil && *sourceInsecure { + if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { + return nil, errors.Errorf("can't require tls verification on an insecured registry") + } + } + if destinationInsecure != nil && *destinationInsecure { + if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { + return nil, errors.Errorf("can't require tls verification on an insecured registry") + } + } + + var returnManifest []byte + f := func() error { + opts := c.imageCopyOptions + if sourceInsecure != nil { + value := types.NewOptionalBool(*sourceInsecure) + opts.SourceCtx.DockerInsecureSkipTLSVerify = value + } + if destinationInsecure != nil { + value := types.NewOptionalBool(*destinationInsecure) + opts.DestinationCtx.DockerInsecureSkipTLSVerify = value + } + + copiedManifest, err := copy.Image(ctx, c.policyContext, destination, source, &opts) + if err == nil { + returnManifest = copiedManifest + } + return err + } + return returnManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions) +} + +// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment +// variable, if it's set. The contents are expected to be a JSON-encoded +// github.com/openshift/api/config/v1.Image, set by an OpenShift build +// controller that arranged for us to be run in a container. +// +// If set, the insecure return value indicates whether the registry is set to +// be insecure. +// +// NOTE: this functionality is required by Buildah for OpenShift. +func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err error) { + registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES") + if !ok || registrySources == "" { + return nil, nil + } + + logrus.Debugf("BUILD_REGISTRY_SOURCES set %q", registrySources) + + dref := dest.DockerReference() + if dref == nil || reference.Domain(dref) == "" { + return nil, nil + } + + // Use local struct instead of github.com/openshift/api/config/v1 RegistrySources + var sources struct { + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + } + if err := json.Unmarshal([]byte(registrySources), &sources); err != nil { + return nil, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources) + } + blocked := false + if len(sources.BlockedRegistries) > 0 { + for _, blockedDomain := range sources.BlockedRegistries { + if blockedDomain == reference.Domain(dref) { + blocked = true + } + } + } + if blocked { + return nil, errors.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources) + } + allowed := true + if len(sources.AllowedRegistries) > 0 { + allowed = false + for _, allowedDomain := range sources.AllowedRegistries { + if allowedDomain == reference.Domain(dref) { + allowed = true + } + } + } + if !allowed { + return nil, errors.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources) + } + + for _, inseureDomain := range sources.InsecureRegistries { + if inseureDomain == reference.Domain(dref) { + insecure := true + return &insecure, nil + } + } + + return nil, nil +} diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go new file mode 100644 index 00000000000..2cde098468f --- /dev/null +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -0,0 +1,130 @@ +package libimage + +import ( + "context" + "time" +) + +// ImageDiskUsage reports the total size of an image. That is the size +type ImageDiskUsage struct { + // Number of containers using the image. + Containers int + // ID of the image. + ID string + // Repository of the image. + Repository string + // Tag of the image. + Tag string + // Created time stamp. + Created time.Time + // The amount of space that an image shares with another one (i.e. their common data). + SharedSize int64 + // The the amount of space that is only used by a given image. + UniqueSize int64 + // Sum of shared an unique size. + Size int64 +} + +// DiskUsage calculates the disk usage for each image in the local containers +// storage. Note that a single image may yield multiple usage reports, one for +// each repository tag. +func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, error) { + layerTree, err := r.layerTree() + if err != nil { + return nil, err + } + + images, err := r.ListImages(ctx, nil, nil) + if err != nil { + return nil, err + } + + var allUsages []ImageDiskUsage + for _, image := range images { + usages, err := diskUsageForImage(ctx, image, layerTree) + if err != nil { + return nil, err + } + allUsages = append(allUsages, usages...) + } + return allUsages, err +} + +// diskUsageForImage returns the disk-usage baseistics for the specified image. +func diskUsageForImage(ctx context.Context, image *Image, tree *layerTree) ([]ImageDiskUsage, error) { + if err := image.isCorrupted(""); err != nil { + return nil, err + } + + base := ImageDiskUsage{ + ID: image.ID(), + Created: image.Created(), + Repository: "", + Tag: "", + } + + // Shared, unique and total size. + parent, err := tree.parent(ctx, image) + if err != nil { + return nil, err + } + childIDs, err := tree.children(ctx, image, false) + if err != nil { + return nil, err + } + + // Optimistically set unique size to the full size of the image. + size, err := image.Size() + if err != nil { + return nil, err + } + base.UniqueSize = size + + if len(childIDs) > 0 { + // If we have children, we share everything. + base.SharedSize = base.UniqueSize + base.UniqueSize = 0 + } else if parent != nil { + // If we have no children but a parent, remove the parent + // (shared) size from the unique one. + size, err := parent.Size() + if err != nil { + return nil, err + } + base.UniqueSize -= size + base.SharedSize = size + } + + base.Size = base.SharedSize + base.UniqueSize + + // Number of containers using the image. + containers, err := image.Containers() + if err != nil { + return nil, err + } + base.Containers = len(containers) + + repoTags, err := image.NamedRepoTags() + if err != nil { + return nil, err + } + + if len(repoTags) == 0 { + return []ImageDiskUsage{base}, nil + } + + pairs, err := ToNameTagPairs(repoTags) + if err != nil { + return nil, err + } + + results := make([]ImageDiskUsage, len(pairs)) + for i, pair := range pairs { + res := base + res.Repository = pair.Name + res.Tag = pair.Tag + results[i] = res + } + + return results, nil +} diff --git a/vendor/github.com/containers/common/libimage/events.go b/vendor/github.com/containers/common/libimage/events.go new file mode 100644 index 00000000000..c7733564db5 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/events.go @@ -0,0 +1,62 @@ +package libimage + +import ( + "time" + + "github.com/sirupsen/logrus" +) + +// EventType indicates the type of an event. Currently, there is only one +// supported type for container image but we may add more (e.g., for manifest +// lists) in the future. +type EventType int + +const ( + // EventTypeUnknown is an uninitialized EventType. + EventTypeUnknown EventType = iota + // EventTypeImagePull represents an image pull. + EventTypeImagePull + // EventTypeImagePush represents an image push. + EventTypeImagePush + // EventTypeImageRemove represents an image removal. + EventTypeImageRemove + // EventTypeImageLoad represents an image being loaded. + EventTypeImageLoad + // EventTypeImageSave represents an image being saved. + EventTypeImageSave + // EventTypeImageTag represents an image being tagged. + EventTypeImageTag + // EventTypeImageUntag represents an image being untagged. + EventTypeImageUntag + // EventTypeImageMount represents an image being mounted. + EventTypeImageMount + // EventTypeImageUnmount represents an image being unmounted. + EventTypeImageUnmount +) + +// Event represents an event such an image pull or image tag. +type Event struct { + // ID of the object (e.g., image ID). + ID string + // Name of the object (e.g., image name "quay.io/containers/podman:latest") + Name string + // Time of the event. + Time time.Time + // Type of the event. + Type EventType +} + +// writeEvent writes the specified event to the Runtime's event channel. The +// event is discarded if no event channel has been registered (yet). +func (r *Runtime) writeEvent(event *Event) { + select { + case r.eventChannel <- event: + // Done + case <-time.After(2 * time.Second): + // The Runtime's event channel has a buffer of size 100 which + // should be enough even under high load. However, we + // shouldn't block too long in case the buffer runs full (could + // be an honest user error or bug). + logrus.Warnf("Discarding libimage event which was not read within 2 seconds: %v", event) + } +} diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go new file mode 100644 index 00000000000..f9f73f527ef --- /dev/null +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -0,0 +1,398 @@ +package libimage + +import ( + "context" + "fmt" + "path" + "strconv" + "strings" + "time" + + filtersPkg "github.com/containers/common/pkg/filters" + "github.com/containers/common/pkg/timetype" + "github.com/containers/image/v5/docker/reference" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// filterFunc is a prototype for a positive image filter. Returning `true` +// indicates that the image matches the criteria. +type filterFunc func(*Image) (bool, error) + +// Apply the specified filters. At least one filter of each key must apply. +func (i *Image) applyFilters(filters map[string][]filterFunc) (bool, error) { + matches := false + for key := range filters { // and + matches = false + for _, filter := range filters[key] { // or + var err error + matches, err = filter(i) + if err != nil { + // Some images may have been corrupted in the + // meantime, so do an extra check and make the + // error non-fatal (see containers/podman/issues/12582). + if errCorrupted := i.isCorrupted(""); errCorrupted != nil { + logrus.Errorf(errCorrupted.Error()) + return false, nil + } + return false, err + } + if matches { + break + } + } + if !matches { + return false, nil + } + } + return matches, nil +} + +// filterImages returns a slice of images which are passing all specified +// filters. +func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *ListImagesOptions) ([]*Image, error) { + if len(options.Filters) == 0 || len(images) == 0 { + return images, nil + } + + filters, err := r.compileImageFilters(ctx, options) + if err != nil { + return nil, err + } + result := []*Image{} + for i := range images { + match, err := images[i].applyFilters(filters) + if err != nil { + return nil, err + } + if match { + result = append(result, images[i]) + } + } + return result, nil +} + +// compileImageFilters creates `filterFunc`s for the specified filters. The +// required format is `key=value` with the following supported keys: +// after, since, before, containers, dangling, id, label, readonly, reference, intermediate +func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (map[string][]filterFunc, error) { + logrus.Tracef("Parsing image filters %s", options.Filters) + + var tree *layerTree + getTree := func() (*layerTree, error) { + if tree == nil { + t, err := r.layerTree() + if err != nil { + return nil, err + } + tree = t + } + return tree, nil + } + + filters := map[string][]filterFunc{} + duplicate := map[string]string{} + for _, f := range options.Filters { + var key, value string + var filter filterFunc + negate := false + split := strings.SplitN(f, "!=", 2) + if len(split) == 2 { + negate = true + } else { + split = strings.SplitN(f, "=", 2) + if len(split) != 2 { + return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value") + } + } + + key = split[0] + value = split[1] + switch key { + + case "after", "since": + img, err := r.time(key, value) + if err != nil { + return nil, err + } + key = "since" + filter = filterAfter(img.Created()) + + case "before": + img, err := r.time(key, value) + if err != nil { + return nil, err + } + filter = filterBefore(img.Created()) + + case "containers": + if err := r.containers(duplicate, key, value, options.IsExternalContainerFunc); err != nil { + return nil, err + } + filter = filterContainers(value, options.IsExternalContainerFunc) + + case "dangling": + dangling, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + t, err := getTree() + if err != nil { + return nil, err + } + + filter = filterDangling(ctx, dangling, t) + + case "id": + filter = filterID(value) + + case "intermediate": + intermediate, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + t, err := getTree() + if err != nil { + return nil, err + } + + filter = filterIntermediate(ctx, intermediate, t) + + case "label": + filter = filterLabel(ctx, value) + + case "readonly": + readOnly, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + filter = filterReadOnly(readOnly) + + case "manifest": + manifest, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + filter = filterManifest(ctx, manifest) + + case "reference": + filter = filterReferences(value) + + case "until": + until, err := r.until(value) + if err != nil { + return nil, err + } + filter = filterBefore(until) + + default: + return nil, errors.Errorf("unsupported image filter %q", key) + } + if negate { + filter = negateFilter(filter) + } + filters[key] = append(filters[key], filter) + } + + return filters, nil +} + +func negateFilter(f filterFunc) filterFunc { + return func(img *Image) (bool, error) { + b, err := f(img) + return !b, err + } +} + +func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error { + if exists, ok := duplicate[key]; ok && exists != value { + return errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + switch value { + case "false", "true": + case "external": + if externalFunc == nil { + return fmt.Errorf("libimage error: external containers filter without callback") + } + default: + return fmt.Errorf("unsupported value %q for containers filter", value) + } + return nil +} + +func (r *Runtime) until(value string) (time.Time, error) { + var until time.Time + ts, err := timetype.GetTimestamp(value, time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + return time.Unix(seconds, nanoseconds), nil +} + +func (r *Runtime) time(key, value string) (*Image, error) { + img, _, err := r.LookupImage(value, nil) + if err != nil { + return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value) + } + return img, nil +} + +func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) { + if exists, ok := duplicate[key]; ok && exists != value { + return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + set, err := strconv.ParseBool(value) + if err != nil { + return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value) + } + return set, nil +} + +// filterManifest filters whether or not the image is a manifest list +func filterManifest(ctx context.Context, value bool) filterFunc { + return func(img *Image) (bool, error) { + isManifestList, err := img.IsManifestList(ctx) + if err != nil { + return false, err + } + return isManifestList == value, nil + } +} + +// filterReferences creates a reference filter for matching the specified value. +func filterReferences(value string) filterFunc { + return func(img *Image) (bool, error) { + refs, err := img.NamesReferences() + if err != nil { + return false, err + } + + for _, ref := range refs { + refString := ref.String() // FQN with tag/digest + candidates := []string{refString} + + // Split the reference into 3 components (twice if diggested/tagged): + // 1) Fully-qualified reference + // 2) Without domain + // 3) Without domain and path + if named, isNamed := ref.(reference.Named); isNamed { + candidates = append(candidates, + reference.Path(named), // path/name without tag/digest (Path() removes it) + refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest + + trimmedString := reference.TrimNamed(named).String() + if refString != trimmedString { + tagOrDigest := refString[len(trimmedString):] + candidates = append(candidates, + trimmedString, // FQN without tag/digest + reference.Path(named)+tagOrDigest, // path/name with tag/digest + trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest + } + } + + for _, candidate := range candidates { + // path.Match() is also used by Docker's reference.FamiliarMatch(). + matched, _ := path.Match(value, candidate) + if matched { + return true, nil + } + } + } + return false, nil + } +} + +// filterLabel creates a label for matching the specified value. +func filterLabel(ctx context.Context, value string) filterFunc { + return func(img *Image) (bool, error) { + labels, err := img.Labels(ctx) + if err != nil { + return false, err + } + return filtersPkg.MatchLabelFilters([]string{value}, labels), nil + } +} + +// filterAfter creates an after filter for matching the specified value. +func filterAfter(value time.Time) filterFunc { + return func(img *Image) (bool, error) { + return img.Created().After(value), nil + } +} + +// filterBefore creates a before filter for matching the specified value. +func filterBefore(value time.Time) filterFunc { + return func(img *Image) (bool, error) { + return img.Created().Before(value), nil + } +} + +// filterReadOnly creates a readonly filter for matching the specified value. +func filterReadOnly(value bool) filterFunc { + return func(img *Image) (bool, error) { + return img.IsReadOnly() == value, nil + } +} + +// filterContainers creates a container filter for matching the specified value. +func filterContainers(value string, fn IsExternalContainerFunc) filterFunc { + return func(img *Image) (bool, error) { + ctrs, err := img.Containers() + if err != nil { + return false, err + } + if value != "external" { + boolValue := value == "true" + return (len(ctrs) > 0) == boolValue, nil + } + + // Check whether all associated containers are external ones. + for _, c := range ctrs { + isExternal, err := fn(c) + if err != nil { + return false, fmt.Errorf("checking if %s is an external container in filter: %w", c, err) + } + if !isExternal { + return isExternal, nil + } + } + return true, nil + } +} + +// filterDangling creates a dangling filter for matching the specified value. +func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc { + return func(img *Image) (bool, error) { + isDangling, err := img.isDangling(ctx, tree) + if err != nil { + return false, err + } + return isDangling == value, nil + } +} + +// filterID creates an image-ID filter for matching the specified value. +func filterID(value string) filterFunc { + return func(img *Image) (bool, error) { + return img.ID() == value, nil + } +} + +// filterIntermediate creates an intermediate filter for images. An image is +// considered to be an intermediate image if it is dangling (i.e., no tags) and +// has no children (i.e., no other image depends on it). +func filterIntermediate(ctx context.Context, value bool, tree *layerTree) filterFunc { + return func(img *Image) (bool, error) { + isIntermediate, err := img.isIntermediate(ctx, tree) + if err != nil { + return false, err + } + return isIntermediate == value, nil + } +} diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go new file mode 100644 index 00000000000..b63fe696bc5 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/history.go @@ -0,0 +1,80 @@ +package libimage + +import ( + "context" + "time" + + "github.com/containers/storage" +) + +// ImageHistory contains the history information of an image. +type ImageHistory struct { + ID string `json:"id"` + Created *time.Time `json:"created"` + CreatedBy string `json:"createdBy"` + Size int64 `json:"size"` + Comment string `json:"comment"` + Tags []string `json:"tags"` +} + +// History computes the image history of the image including all of its parents. +func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { + ociImage, err := i.toOCI(ctx) + if err != nil { + return nil, err + } + + layerTree, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + + var allHistory []ImageHistory + var layer *storage.Layer + if i.TopLayer() != "" { + layer, err = i.runtime.store.Layer(i.TopLayer()) + if err != nil { + return nil, err + } + } + + // Iterate in reverse order over the history entries, and lookup the + // corresponding image ID, size and get the next later if needed. + numHistories := len(ociImage.History) - 1 + usedIDs := make(map[string]bool) // prevents assigning images IDs more than once + for x := numHistories; x >= 0; x-- { + history := ImageHistory{ + ID: "", // may be overridden below + Created: ociImage.History[x].Created, + CreatedBy: ociImage.History[x].CreatedBy, + Comment: ociImage.History[x].Comment, + } + + if layer != nil { + history.Tags = layer.Names + if !ociImage.History[x].EmptyLayer { + history.Size = layer.UncompressedSize + } + // Query the layer tree if it's the top layer of an + // image. + node := layerTree.node(layer.ID) + if len(node.images) > 0 { + id := node.images[0].ID() // always use the first one + if _, used := usedIDs[id]; !used { + history.ID = id + usedIDs[id] = true + } + } + if layer.Parent != "" && !ociImage.History[x].EmptyLayer { + layer, err = i.runtime.store.Layer(layer.Parent) + if err != nil { + return nil, err + } + } + } + + allHistory = append(allHistory, history) + } + + return allHistory, nil +} diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go new file mode 100644 index 00000000000..661ca159b48 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/image.go @@ -0,0 +1,949 @@ +package libimage + +import ( + "context" + "fmt" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/hashicorp/go-multierror" + "github.com/opencontainers/go-digest" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Image represents an image in the containers storage and allows for further +// operations and data manipulation. +type Image struct { + // Backwards pointer to the runtime. + runtime *Runtime + + // Counterpart in the local containers storage. + storageImage *storage.Image + + // Image reference to the containers storage. + storageReference types.ImageReference + + // All fields in the below structure are cached. They may be cleared + // at any time. When adding a new field, please make sure to clear + // it in `(*Image).reload()`. + cached struct { + // Image source. Cached for performance reasons. + imageSource types.ImageSource + // Inspect data we get from containers/image. + partialInspectData *types.ImageInspectInfo + // Fully assembled image data. + completeInspectData *ImageData + // Corresponding OCI image. + ociv1Image *ociv1.Image + // Names() parsed into references. + namesReferences []reference.Reference + } +} + +// reload the image and pessimitically clear all cached data. +func (i *Image) reload() error { + logrus.Tracef("Reloading image %s", i.ID()) + img, err := i.runtime.store.Image(i.ID()) + if err != nil { + return errors.Wrap(err, "reloading image") + } + i.storageImage = img + i.cached.imageSource = nil + i.cached.partialInspectData = nil + i.cached.completeInspectData = nil + i.cached.ociv1Image = nil + i.cached.namesReferences = nil + return nil +} + +// isCorrupted returns an error if the image may be corrupted. +func (i *Image) isCorrupted(name string) error { + // If it's a manifest list, we're good for now. + if _, err := i.getManifestList(); err == nil { + return nil + } + + ref, err := i.StorageReference() + if err != nil { + return err + } + + if _, err := ref.NewImage(context.Background(), nil); err != nil { + if name == "" { + name = i.ID()[:12] + } + return errors.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err) + } + return nil +} + +// Names returns associated names with the image which may be a mix of tags and +// digests. +func (i *Image) Names() []string { + return i.storageImage.Names +} + +// NamesReferences returns Names() as references. +func (i *Image) NamesReferences() ([]reference.Reference, error) { + if i.cached.namesReferences != nil { + return i.cached.namesReferences, nil + } + refs := make([]reference.Reference, 0, len(i.Names())) + for _, name := range i.Names() { + ref, err := reference.Parse(name) + if err != nil { + return nil, err + } + refs = append(refs, ref) + } + i.cached.namesReferences = refs + return refs, nil +} + +// StorageImage returns the underlying storage.Image. +func (i *Image) StorageImage() *storage.Image { + return i.storageImage +} + +// NamesHistory returns a string array of names previously associated with the +// image, which may be a mixture of tags and digests. +func (i *Image) NamesHistory() []string { + return i.storageImage.NamesHistory +} + +// ID returns the ID of the image. +func (i *Image) ID() string { + return i.storageImage.ID +} + +// Digest is a digest value that we can use to locate the image, if one was +// specified at creation-time. Typically it is the digest of one among +// possibly many digests that we have stored for the image, so many +// applications are better off using the entire list returned by Digests(). +func (i *Image) Digest() digest.Digest { + return i.storageImage.Digest +} + +// Digests is a list of digest values of the image's manifests, and possibly a +// manually-specified value, that we can use to locate the image. If Digest is +// set, its value is also in this list. +func (i *Image) Digests() []digest.Digest { + return i.storageImage.Digests +} + +// IsReadOnly returns whether the image is set read only. +func (i *Image) IsReadOnly() bool { + return i.storageImage.ReadOnly +} + +// IsDangling returns true if the image is dangling, that is an untagged image +// without children. +func (i *Image) IsDangling(ctx context.Context) (bool, error) { + return i.isDangling(ctx, nil) +} + +// isDangling returns true if the image is dangling, that is an untagged image +// without children. If tree is nil, it will created for this invocation only. +func (i *Image) isDangling(ctx context.Context, tree *layerTree) (bool, error) { + if len(i.Names()) > 0 { + return false, nil + } + children, err := i.getChildren(ctx, false, tree) + if err != nil { + return false, err + } + return len(children) == 0, nil +} + +// IsIntermediate returns true if the image is an intermediate image, that is +// an untagged image with children. +func (i *Image) IsIntermediate(ctx context.Context) (bool, error) { + return i.isIntermediate(ctx, nil) +} + +// isIntermediate returns true if the image is an intermediate image, that is +// an untagged image with children. If tree is nil, it will created for this +// invocation only. +func (i *Image) isIntermediate(ctx context.Context, tree *layerTree) (bool, error) { + if len(i.Names()) > 0 { + return false, nil + } + children, err := i.getChildren(ctx, false, tree) + if err != nil { + return false, err + } + return len(children) != 0, nil +} + +// Created returns the time the image was created. +func (i *Image) Created() time.Time { + return i.storageImage.Created +} + +// Labels returns the label of the image. +func (i *Image) Labels(ctx context.Context) (map[string]string, error) { + data, err := i.inspectInfo(ctx) + if err != nil { + isManifestList, listErr := i.IsManifestList(ctx) + if listErr != nil { + err = errors.Wrapf(err, "fallback error checking whether image is a manifest list: %v", err) + } else if isManifestList { + logrus.Debugf("Ignoring error: cannot return labels for manifest list or image index %s", i.ID()) + return nil, nil + } + return nil, err + } + + return data.Labels, nil +} + +// TopLayer returns the top layer id as a string +func (i *Image) TopLayer() string { + return i.storageImage.TopLayer +} + +// Parent returns the parent image or nil if there is none +func (i *Image) Parent(ctx context.Context) (*Image, error) { + tree, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + return tree.parent(ctx, i) +} + +// HasChildren returns indicates if the image has children. +func (i *Image) HasChildren(ctx context.Context) (bool, error) { + children, err := i.getChildren(ctx, false, nil) + if err != nil { + return false, err + } + return len(children) > 0, nil +} + +// Children returns the image's children. +func (i *Image) Children(ctx context.Context) ([]*Image, error) { + children, err := i.getChildren(ctx, true, nil) + if err != nil { + return nil, err + } + return children, nil +} + +// getChildren returns a list of imageIDs that depend on the image. If all is +// false, only the first child image is returned. If tree is nil, it will be +// created for this invocation only. +func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { + if tree == nil { + t, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + tree = t + } + return tree.children(ctx, i, all) +} + +// Containers returns a list of containers using the image. +func (i *Image) Containers() ([]string, error) { + var containerIDs []string + containers, err := i.runtime.store.Containers() + if err != nil { + return nil, err + } + imageID := i.ID() + for i := range containers { + if containers[i].ImageID == imageID { + containerIDs = append(containerIDs, containers[i].ID) + } + } + return containerIDs, nil +} + +// removeContainers removes all containers using the image. +func (i *Image) removeContainers(options *RemoveImagesOptions) error { + if !options.Force && !options.ExternalContainers { + // Nothing to do. + return nil + } + + if options.Force && options.RemoveContainerFunc != nil { + logrus.Debugf("Removing containers of image %s with custom removal function", i.ID()) + if err := options.RemoveContainerFunc(i.ID()); err != nil { + return err + } + } + + containers, err := i.Containers() + if err != nil { + return err + } + + if !options.Force && options.ExternalContainers { + // All containers must be external ones. + for _, cID := range containers { + isExternal, err := options.IsExternalContainerFunc(cID) + if err != nil { + return fmt.Errorf("checking if %s is an external container: %w", cID, err) + } + if !isExternal { + return fmt.Errorf("cannot remove container %s: not an external container", cID) + } + } + } + + logrus.Debugf("Removing containers of image %s from the local containers storage", i.ID()) + var multiE error + for _, cID := range containers { + if err := i.runtime.store.DeleteContainer(cID); err != nil { + // If the container does not exist anymore, we're good. + if errors.Cause(err) != storage.ErrContainerUnknown { + multiE = multierror.Append(multiE, err) + } + } + } + + return multiE +} + +// RemoveContainerFunc allows for customizing the removal of containers using +// an image specified by imageID. +type RemoveContainerFunc func(imageID string) error + +// RemoveImagesReport is the assembled data from removing *one* image. +type RemoveImageReport struct { + // ID of the image. + ID string + // Image was removed. + Removed bool + // Size of the removed image. Only set when explicitly requested in + // RemoveImagesOptions. + Size int64 + // The untagged tags. + Untagged []string +} + +// remove removes the image along with all dangling parent images that no other +// image depends on. The image must not be set read-only and not be used by +// containers. Returns IDs of removed/untagged images in order. +// +// If the image is used by containers return storage.ErrImageUsedByContainer. +// Use force to remove these containers. +// +// NOTE: the rmMap is used to assemble image-removal data across multiple +// invocations of this function. The recursive nature requires some +// bookkeeping to make sure that all data is aggregated correctly. +// +// This function is internal. Users of libimage should always use +// `(*Runtime).RemoveImages()`. +func (i *Image) remove(ctx context.Context, rmMap map[string]*RemoveImageReport, referencedBy string, options *RemoveImagesOptions) ([]string, error) { + processedIDs := []string{} + return i.removeRecursive(ctx, rmMap, processedIDs, referencedBy, options) +} + +func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveImageReport, processedIDs []string, referencedBy string, options *RemoveImagesOptions) ([]string, error) { + // If referencedBy is empty, the image is considered to be removed via + // `image remove --all` which alters the logic below. + + // The removal logic below is complex. There is a number of rules + // inherited from Podman and Buildah (and Docker). This function + // should be the *only* place to extend the removal logic so we keep it + // sealed in one place. Make sure to add verbose comments to leave + // some breadcrumbs for future readers. + logrus.Debugf("Removing image %s", i.ID()) + + if i.IsReadOnly() { + return processedIDs, errors.Errorf("cannot remove read-only image %q", i.ID()) + } + + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: referencedBy, Time: time.Now(), Type: EventTypeImageRemove}) + } + + // Check if already visisted this image. + report, exists := rmMap[i.ID()] + if exists { + // If the image has already been removed, we're done. + if report.Removed { + return processedIDs, nil + } + } else { + report = &RemoveImageReport{ID: i.ID()} + rmMap[i.ID()] = report + } + + // The image may have already been (partially) removed, so we need to + // have a closer look at the errors. On top, image removal should be + // tolerant toward corrupted images. + handleError := func(err error) error { + switch errors.Cause(err) { + case storage.ErrImageUnknown, storage.ErrNotAnImage, storage.ErrLayerUnknown: + // The image or layers of the image may already + // have been removed in which case we consider + // the image to be removed. + return nil + default: + return err + } + } + + // Calculate the size if requested. `podman-image-prune` likes to + // report the regained size. + if options.WithSize { + size, err := i.Size() + if handleError(err) != nil { + return processedIDs, err + } + report.Size = size + } + + skipRemove := false + numNames := len(i.Names()) + + // NOTE: the `numNames == 1` check is not only a performance + // optimization but also preserves exiting Podman/Docker behaviour. + // If image "foo" is used by a container and has only this tag/name, + // an `rmi foo` will not untag "foo" but instead attempt to remove the + // entire image. If there's a container using "foo", we should get an + // error. + if referencedBy == "" || numNames == 1 { + // DO NOTHING, the image will be removed + } else { + byID := strings.HasPrefix(i.ID(), referencedBy) + byDigest := strings.HasPrefix(referencedBy, "sha256:") + if !options.Force { + if byID && numNames > 1 { + return processedIDs, errors.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names()) + } else if byDigest && numNames > 1 { + // FIXME - Docker will remove the digest but containers storage + // does not support that yet, so our hands are tied. + return processedIDs, errors.Errorf("unable to delete image %q by digest with more than one tag (%s): please force removal", i.ID(), i.Names()) + } + } + + // Only try to untag if we know it's not an ID or digest. + if !byID && !byDigest { + if err := i.Untag(referencedBy); handleError(err) != nil { + return processedIDs, err + } + report.Untagged = append(report.Untagged, referencedBy) + + // If there's still tags left, we cannot delete it. + skipRemove = len(i.Names()) > 0 + } + } + + processedIDs = append(processedIDs, i.ID()) + if skipRemove { + return processedIDs, nil + } + + // Perform the container removal, if needed. + if err := i.removeContainers(options); err != nil { + return processedIDs, err + } + + // Podman/Docker compat: we only report an image as removed if it has + // no children. Otherwise, the data is effectively still present in the + // storage despite the image being removed. + hasChildren, err := i.HasChildren(ctx) + if err != nil { + // We must be tolerant toward corrupted images. + // See containers/podman commit fd9dd7065d44. + logrus.Warnf("Failed to determine if an image is a parent: %v, ignoring the error", err) + hasChildren = false + } + + // If there's a dangling parent that no other image depends on, remove + // it recursively. + parent, err := i.Parent(ctx) + if err != nil { + // We must be tolerant toward corrupted images. + // See containers/podman commit fd9dd7065d44. + logrus.Warnf("Failed to determine parent of image: %v, ignoring the error", err) + parent = nil + } + + if _, err := i.runtime.store.DeleteImage(i.ID(), true); handleError(err) != nil { + return processedIDs, err + } + report.Untagged = append(report.Untagged, i.Names()...) + + if !hasChildren { + report.Removed = true + } + + // Check if can remove the parent image. + if parent == nil { + return processedIDs, nil + } + + // Only remove the parent if it's dangling, that is being untagged and + // without children. + danglingParent, err := parent.IsDangling(ctx) + if err != nil { + // See Podman commit fd9dd7065d44: we need to + // be tolerant toward corrupted images. + logrus.Warnf("Failed to determine if an image is a parent: %v, ignoring the error", err) + danglingParent = false + } + if !danglingParent { + return processedIDs, nil + } + + // Recurse into removing the parent. + return parent.removeRecursive(ctx, rmMap, processedIDs, "", options) +} + +var errTagDigest = errors.New("tag by digest not supported") + +// Tag the image with the specified name and store it in the local containers +// storage. The name is normalized according to the rules of NormalizeName. +func (i *Image) Tag(name string) error { + if strings.HasPrefix(name, "sha256:") { // ambiguous input + return errors.Wrap(errTagDigest, name) + } + + ref, err := NormalizeName(name) + if err != nil { + return errors.Wrapf(err, "normalizing name %q", name) + } + + if _, isDigested := ref.(reference.Digested); isDigested { + return errors.Wrap(errTagDigest, name) + } + + logrus.Debugf("Tagging image %s with %q", i.ID(), ref.String()) + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageTag}) + } + + newNames := append(i.Names(), ref.String()) + if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + return err + } + + return i.reload() +} + +// to have some symmetry with the errors from containers/storage. +var errTagUnknown = errors.New("tag not known") + +// TODO (@vrothberg) - `docker rmi sha256:` will remove the digest from the +// image. However, that's something containers storage does not support. +var errUntagDigest = errors.New("untag by digest not supported") + +// Untag the image with the specified name and make the change persistent in +// the local containers storage. The name is normalized according to the rules +// of NormalizeName. +func (i *Image) Untag(name string) error { + if strings.HasPrefix(name, "sha256:") { // ambiguous input + return errors.Wrap(errUntagDigest, name) + } + + ref, err := NormalizeName(name) + if err != nil { + return errors.Wrapf(err, "normalizing name %q", name) + } + + // FIXME: this is breaking Podman CI but must be re-enabled once + // c/storage supports alterting the digests of an image. Then, + // Podman will do the right thing. + // + // !!! Also make sure to re-enable the tests !!! + // + // if _, isDigested := ref.(reference.Digested); isDigested { + // return errors.Wrap(errUntagDigest, name) + // } + + name = ref.String() + + logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID()) + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag}) + } + + removedName := false + newNames := []string{} + for _, n := range i.Names() { + if n == name { + removedName = true + continue + } + newNames = append(newNames, n) + } + + if !removedName { + return errors.Wrap(errTagUnknown, name) + } + + if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil { + return err + } + + return i.reload() +} + +// RepoTags returns a string slice of repotags associated with the image. +func (i *Image) RepoTags() ([]string, error) { + namedTagged, err := i.NamedTaggedRepoTags() + if err != nil { + return nil, err + } + repoTags := make([]string, len(namedTagged)) + for i := range namedTagged { + repoTags[i] = namedTagged[i].String() + } + return repoTags, nil +} + +// NamedTaggedRepoTags returns the repotags associated with the image as a +// slice of reference.NamedTagged. +func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) { + var repoTags []reference.NamedTagged + for _, name := range i.Names() { + parsed, err := reference.Parse(name) + if err != nil { + return nil, err + } + named, isNamed := parsed.(reference.Named) + if !isNamed { + continue + } + tagged, isTagged := named.(reference.NamedTagged) + if !isTagged { + continue + } + repoTags = append(repoTags, tagged) + } + return repoTags, nil +} + +// NamedRepoTags returns the repotags associated with the image as a +// slice of reference.Named. +func (i *Image) NamedRepoTags() ([]reference.Named, error) { + var repoTags []reference.Named + for _, name := range i.Names() { + parsed, err := reference.Parse(name) + if err != nil { + return nil, err + } + if named, isNamed := parsed.(reference.Named); isNamed { + repoTags = append(repoTags, named) + } + } + return repoTags, nil +} + +// inRepoTags looks for the specified name/tag pair in the image's repo tags. +func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) { + repoTags, err := i.NamedRepoTags() + if err != nil { + return nil, err + } + + pairs, err := ToNameTagPairs(repoTags) + if err != nil { + return nil, err + } + + name := namedTagged.Name() + tag := namedTagged.Tag() + for _, pair := range pairs { + if tag != pair.Tag { + continue + } + if !strings.HasSuffix(pair.Name, name) { + continue + } + if len(pair.Name) == len(name) { // full match + return pair.named, nil + } + if pair.Name[len(pair.Name)-len(name)-1] == '/' { // matches at repo + return pair.named, nil + } + } + + return nil, nil +} + +// RepoDigests returns a string array of repodigests associated with the image. +func (i *Image) RepoDigests() ([]string, error) { + repoDigests := []string{} + added := make(map[string]struct{}) + + for _, name := range i.Names() { + for _, imageDigest := range append(i.Digests(), i.Digest()) { + if imageDigest == "" { + continue + } + + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest) + if err != nil { + return nil, err + } + + if _, alreadyInList := added[canonical.String()]; !alreadyInList { + repoDigests = append(repoDigests, canonical.String()) + added[canonical.String()] = struct{}{} + } + } + } + sort.Strings(repoDigests) + return repoDigests, nil +} + +// Mount the image with the specified mount options and label, both of which +// are directly passed down to the containers storage. Returns the fully +// evaluated path to the mount point. +func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel string) (string, error) { + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: "", Time: time.Now(), Type: EventTypeImageMount}) + } + + mountPoint, err := i.runtime.store.MountImage(i.ID(), mountOptions, mountLabel) + if err != nil { + return "", err + } + mountPoint, err = filepath.EvalSymlinks(mountPoint) + if err != nil { + return "", err + } + logrus.Debugf("Mounted image %s at %q", i.ID(), mountPoint) + return mountPoint, nil +} + +// Mountpoint returns the path to image's mount point. The path is empty if +// the image is not mounted. +func (i *Image) Mountpoint() (string, error) { + mountedTimes, err := i.runtime.store.Mounted(i.TopLayer()) + if err != nil || mountedTimes == 0 { + if errors.Cause(err) == storage.ErrLayerUnknown { + // Can happen, Podman did it, but there's no + // explanation why. + err = nil + } + return "", err + } + + layer, err := i.runtime.store.Layer(i.TopLayer()) + if err != nil { + return "", err + } + + mountPoint, err := filepath.EvalSymlinks(layer.MountPoint) + if err != nil { + return "", err + } + + return mountPoint, nil +} + +// Unmount the image. Use force to ignore the reference counter and forcefully +// unmount. +func (i *Image) Unmount(force bool) error { + if i.runtime.eventChannel != nil { + defer i.runtime.writeEvent(&Event{ID: i.ID(), Name: "", Time: time.Now(), Type: EventTypeImageUnmount}) + } + logrus.Debugf("Unmounted image %s", i.ID()) + _, err := i.runtime.store.UnmountImage(i.ID(), force) + return err +} + +// Size computes the size of the image layers and associated data. +func (i *Image) Size() (int64, error) { + return i.runtime.store.ImageSize(i.ID()) +} + +// HasDifferentDigestOptions allows for customizing the check if another +// (remote) image has a different digest. +type HasDifferentDigestOptions struct { + // containers-auth.json(5) file to use when authenticating against + // container registries. + AuthFilePath string +} + +// HasDifferentDigest returns true if the image specified by `remoteRef` has a +// different digest than the local one. This check can be useful to check for +// updates on remote registries. +func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageReference, options *HasDifferentDigestOptions) (bool, error) { + // We need to account for the arch that the image uses. It seems + // common on ARM to tweak this option to pull the correct image. See + // github.com/containers/podman/issues/6613. + inspectInfo, err := i.inspectInfo(ctx) + if err != nil { + return false, err + } + + sys := i.runtime.systemContextCopy() + sys.ArchitectureChoice = inspectInfo.Architecture + // OS and variant may not be set, so let's check to avoid accidental + // overrides of the runtime settings. + if inspectInfo.Os != "" { + sys.OSChoice = inspectInfo.Os + } + if inspectInfo.Variant != "" { + sys.VariantChoice = inspectInfo.Variant + } + + if options != nil && options.AuthFilePath != "" { + sys.AuthFilePath = options.AuthFilePath + } + + return i.hasDifferentDigestWithSystemContext(ctx, remoteRef, sys) +} + +func (i *Image) hasDifferentDigestWithSystemContext(ctx context.Context, remoteRef types.ImageReference, sys *types.SystemContext) (bool, error) { + remoteImg, err := remoteRef.NewImage(ctx, sys) + if err != nil { + return false, err + } + + rawManifest, rawManifestMIMEType, err := remoteImg.Manifest(ctx) + if err != nil { + return false, err + } + + // If the remote ref's manifest is a list, try to zero in on the image + // in the list that we would eventually try to pull. + var remoteDigest digest.Digest + if manifest.MIMETypeIsMultiImage(rawManifestMIMEType) { + list, err := manifest.ListFromBlob(rawManifest, rawManifestMIMEType) + if err != nil { + return false, err + } + remoteDigest, err = list.ChooseInstance(sys) + if err != nil { + return false, err + } + } else { + remoteDigest, err = manifest.Digest(rawManifest) + if err != nil { + return false, err + } + } + + // Check if we already have that image's manifest in this image. A + // single image can have multiple manifests that describe the same + // config blob and layers, so treat any match as a successful match. + for _, digest := range append(i.Digests(), i.Digest()) { + if digest.Validate() != nil { + continue + } + if digest.String() == remoteDigest.String() { + return false, nil + } + } + // No matching digest found in the local image. + return true, nil +} + +// driverData gets the driver data from the store on a layer +func (i *Image) driverData() (*DriverData, error) { + store := i.runtime.store + layerID := i.TopLayer() + driver, err := store.GraphDriver() + if err != nil { + return nil, err + } + metaData, err := driver.Metadata(layerID) + if err != nil { + return nil, err + } + if mountTimes, err := store.Mounted(layerID); mountTimes == 0 || err != nil { + delete(metaData, "MergedDir") + } + return &DriverData{ + Name: driver.String(), + Data: metaData, + }, nil +} + +// StorageReference returns the image's reference to the containers storage +// using the image ID. +func (i *Image) StorageReference() (types.ImageReference, error) { + if i.storageReference != nil { + return i.storageReference, nil + } + ref, err := storageTransport.Transport.ParseStoreReference(i.runtime.store, "@"+i.ID()) + if err != nil { + return nil, err + } + i.storageReference = ref + return ref, nil +} + +// source returns the possibly cached image reference. +func (i *Image) source(ctx context.Context) (types.ImageSource, error) { + if i.cached.imageSource != nil { + return i.cached.imageSource, nil + } + ref, err := i.StorageReference() + if err != nil { + return nil, err + } + src, err := ref.NewImageSource(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + i.cached.imageSource = src + return src, nil +} + +// rawConfigBlob returns the image's config as a raw byte slice. Users need to +// unmarshal it to the corresponding type (OCI, Docker v2s{1,2}) +func (i *Image) rawConfigBlob(ctx context.Context) ([]byte, error) { + ref, err := i.StorageReference() + if err != nil { + return nil, err + } + + imageCloser, err := ref.NewImage(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + defer imageCloser.Close() + + return imageCloser.ConfigBlob(ctx) +} + +// Manifest returns the raw data and the MIME type of the image's manifest. +func (i *Image) Manifest(ctx context.Context) (rawManifest []byte, mimeType string, err error) { + src, err := i.source(ctx) + if err != nil { + return nil, "", err + } + return src.GetManifest(ctx, nil) +} + +// getImageID creates an image object and uses the hex value of the config +// blob's digest (if it has one) as the image ID for parsing the store reference +func getImageID(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) { + newImg, err := src.NewImage(ctx, sys) + if err != nil { + return "", err + } + defer func() { + if err := newImg.Close(); err != nil { + logrus.Errorf("Failed to close image: %q", err) + } + }() + imageDigest := newImg.ConfigInfo().Digest + if err = imageDigest.Validate(); err != nil { + return "", errors.Wrapf(err, "getting config info") + } + return "@" + imageDigest.Encoded(), nil +} diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go new file mode 100644 index 00000000000..683a2dc9806 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -0,0 +1,239 @@ +package libimage + +import ( + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/common/pkg/signal" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported +// by containers/image, but containing additional fields that are not supported +// by OCIv1 (but are by Docker v2) - notably OnBuild. +type ImageConfig struct { + ociv1.ImageConfig + OnBuild []string +} + +// ImageConfigFromChanges produces a v1.ImageConfig from the --change flag that +// is accepted by several Podman commands. It accepts a (limited subset) of +// Dockerfile instructions. +// Valid changes are: +// * USER +// * EXPOSE +// * ENV +// * ENTRYPOINT +// * CMD +// * VOLUME +// * WORKDIR +// * LABEL +// * STOPSIGNAL +// * ONBUILD +func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:gocyclo + config := &ImageConfig{} + + for _, change := range changes { + // First, let's assume proper Dockerfile format - space + // separator between instruction and value + split := strings.SplitN(change, " ", 2) + + if len(split) != 2 { + split = strings.SplitN(change, "=", 2) + if len(split) != 2 { + return nil, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change) + } + } + + outerKey := strings.ToUpper(strings.TrimSpace(split[0])) + value := strings.TrimSpace(split[1]) + switch outerKey { + case "USER": + // Assume literal contents are the user. + if value == "" { + return nil, errors.Errorf("invalid change %q - must provide a value to USER", change) + } + config.User = value + case "EXPOSE": + // EXPOSE is either [portnum] or + // [portnum]/[proto] + // Protocol must be "tcp" or "udp" + splitPort := strings.Split(value, "/") + if len(splitPort) > 2 { + return nil, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change) + } + portNum, err := strconv.Atoi(splitPort[0]) + if err != nil { + return nil, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change) + } + if portNum > 65535 || portNum <= 0 { + return nil, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change) + } + proto := "tcp" + if len(splitPort) > 1 { + testProto := strings.ToLower(splitPort[1]) + switch testProto { + case "tcp", "udp": + proto = testProto + default: + return nil, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change) + } + } + if config.ExposedPorts == nil { + config.ExposedPorts = make(map[string]struct{}) + } + config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{} + case "ENV": + // Format is either: + // ENV key=value + // ENV key=value key=value ... + // ENV key value + // Both keys and values can be surrounded by quotes to group them. + // For now: we only support key=value + // We will attempt to strip quotation marks if present. + + var key, val string + + splitEnv := strings.SplitN(value, "=", 2) + key = splitEnv[0] + // We do need a key + if key == "" { + return nil, errors.Errorf("invalid change %q - ENV must have at least one argument", change) + } + // Perfectly valid to not have a value + if len(splitEnv) == 2 { + val = splitEnv[1] + } + + if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { + key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) + } + if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { + val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) + } + config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val)) + case "ENTRYPOINT": + // Two valid forms. + // First, JSON array. + // Second, not a JSON array - we interpret this as an + // argument to `sh -c`, unless empty, in which case we + // just use a blank entrypoint. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // It ain't valid JSON, so assume it's an + // argument to sh -c if not empty. + if value != "" { + config.Entrypoint = []string{"/bin/sh", "-c", value} + } else { + config.Entrypoint = []string{} + } + } else { + // Valid JSON + config.Entrypoint = testUnmarshal + } + case "CMD": + // Same valid forms as entrypoint. + // However, where ENTRYPOINT assumes that 'ENTRYPOINT ' + // means no entrypoint, CMD assumes it is 'sh -c' with + // no third argument. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // It ain't valid JSON, so assume it's an + // argument to sh -c. + // Only include volume if it's not "" + config.Cmd = []string{"/bin/sh", "-c"} + if value != "" { + config.Cmd = append(config.Cmd, value) + } + } else { + // Valid JSON + config.Cmd = testUnmarshal + } + case "VOLUME": + // Either a JSON array or a set of space-separated + // paths. + // Acts rather similar to ENTRYPOINT and CMD, but always + // appends rather than replacing, and no sh -c prepend. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // Not valid JSON, so split on spaces + testUnmarshal = strings.Split(value, " ") + } + if len(testUnmarshal) == 0 { + return nil, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change) + } + for _, vol := range testUnmarshal { + if vol == "" { + return nil, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change) + } + if config.Volumes == nil { + config.Volumes = make(map[string]struct{}) + } + config.Volumes[vol] = struct{}{} + } + case "WORKDIR": + // This can be passed multiple times. + // Each successive invocation is treated as relative to + // the previous one - so WORKDIR /A, WORKDIR b, + // WORKDIR c results in /A/b/c + // Just need to check it's not empty... + if value == "" { + return nil, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change) + } + config.WorkingDir = filepath.Join(config.WorkingDir, value) + case "LABEL": + // Same general idea as ENV, but we no longer allow " " + // as a separator. + // We didn't do that for ENV either, so nice and easy. + // Potentially problematic: LABEL might theoretically + // allow an = in the key? If people really do this, we + // may need to investigate more advanced parsing. + var ( + key, val string + ) + + splitLabel := strings.SplitN(value, "=", 2) + // Unlike ENV, LABEL must have a value + if len(splitLabel) != 2 { + return nil, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change) + } + key = splitLabel[0] + val = splitLabel[1] + + if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { + key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) + } + if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { + val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) + } + // Check key after we strip quotations + if key == "" { + return nil, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change) + } + if config.Labels == nil { + config.Labels = make(map[string]string) + } + config.Labels[key] = val + case "STOPSIGNAL": + // Check the provided signal for validity. + killSignal, err := signal.ParseSignal(value) + if err != nil { + return nil, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change) + } + config.StopSignal = fmt.Sprintf("%d", killSignal) + case "ONBUILD": + // Onbuild always appends. + if value == "" { + return nil, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change) + } + config.OnBuild = append(config.OnBuild, value) + default: + return nil, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey) + } + } + + return config, nil +} diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go new file mode 100644 index 00000000000..d48aeeada34 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -0,0 +1,117 @@ +package libimage + +import ( + "fmt" + "strings" + + "github.com/disiqueira/gotree/v3" + "github.com/docker/go-units" +) + +// Tree generates a tree for the specified image and its layers. Use +// `traverseChildren` to traverse the layers of all children. By default, only +// layers of the image are printed. +func (i *Image) Tree(traverseChildren bool) (string, error) { + // NOTE: a string builder prevents us from copying to much data around + // and compile the string when and where needed. + sb := &strings.Builder{} + + // First print the pretty header for the target image. + size, err := i.Size() + if err != nil { + return "", err + } + repoTags, err := i.RepoTags() + if err != nil { + return "", err + } + + fmt.Fprintf(sb, "Image ID: %s\n", i.ID()[:12]) + fmt.Fprintf(sb, "Tags: %s\n", repoTags) + fmt.Fprintf(sb, "Size: %v\n", units.HumanSizeWithPrecision(float64(size), 4)) + if i.TopLayer() != "" { + fmt.Fprintf(sb, "Image Layers") + } else { + fmt.Fprintf(sb, "No Image Layers") + } + + layerTree, err := i.runtime.layerTree() + if err != nil { + return "", err + } + imageNode := layerTree.node(i.TopLayer()) + + // Traverse the entire tree down to all children. + if traverseChildren { + tree := gotree.New(sb.String()) + if err := imageTreeTraverseChildren(imageNode, tree); err != nil { + return "", err + } + return tree.Print(), nil + } + + // Walk all layers of the image and assemlbe their data. Note that the + // tree is constructed in reverse order to remain backwards compatible + // with Podman. + contents := []string{} + for parentNode := imageNode; parentNode != nil; parentNode = parentNode.parent { + if parentNode.layer == nil { + break // we're done + } + var tags string + repoTags, err := parentNode.repoTags() + if err != nil { + return "", err + } + if len(repoTags) > 0 { + tags = fmt.Sprintf(" Top Layer of: %s", repoTags) + } + content := fmt.Sprintf("ID: %s Size: %7v%s", parentNode.layer.ID[:12], units.HumanSizeWithPrecision(float64(parentNode.layer.UncompressedSize), 4), tags) + contents = append(contents, content) + } + contents = append(contents, sb.String()) + + tree := gotree.New(contents[len(contents)-1]) + for i := len(contents) - 2; i >= 0; i-- { + tree.Add(contents[i]) + } + + return tree.Print(), nil +} + +func imageTreeTraverseChildren(node *layerNode, parent gotree.Tree) error { + if node.layer == nil { + return nil + } + + var tags string + repoTags, err := node.repoTags() + if err != nil { + return err + } + if len(repoTags) > 0 { + tags = fmt.Sprintf(" Top Layer of: %s", repoTags) + } + + content := fmt.Sprintf("ID: %s Size: %7v%s", node.layer.ID[:12], units.HumanSizeWithPrecision(float64(node.layer.UncompressedSize), 4), tags) + + var newTree gotree.Tree + if node.parent == nil || len(node.parent.children) <= 1 { + // No parent or no siblings, so we can go linear. + parent.Add(content) + newTree = parent + } else { + // Each siblings gets a new tree, so we can branch. + newTree = gotree.New(content) + parent.AddTree(newTree) + } + + for i := range node.children { + child := node.children[i] + if err := imageTreeTraverseChildren(child, newTree); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go new file mode 100644 index 00000000000..3db392784e9 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/import.go @@ -0,0 +1,128 @@ +package libimage + +import ( + "context" + "fmt" + "net/url" + "os" + + "github.com/containers/common/pkg/download" + storageTransport "github.com/containers/image/v5/storage" + tarballTransport "github.com/containers/image/v5/tarball" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ImportOptions allow for customizing image imports. +type ImportOptions struct { + CopyOptions + + // Apply the specified changes to the created image. Please refer to + // `ImageConfigFromChanges` for supported change instructions. + Changes []string + // Set the commit message as a comment to created image's history. + CommitMessage string + // Tag the imported image with this value. + Tag string + // Overwrite OS of imported image. + OS string + // Overwrite Arch of imported image. + Arch string +} + +// Import imports a custom tarball at the specified path. Returns the name of +// the imported image. +func (r *Runtime) Import(ctx context.Context, path string, options *ImportOptions) (string, error) { + logrus.Debugf("Importing image from %q", path) + + if options == nil { + options = &ImportOptions{} + } + + ic := v1.ImageConfig{} + if len(options.Changes) > 0 { + config, err := ImageConfigFromChanges(options.Changes) + if err != nil { + return "", err + } + ic = config.ImageConfig + } + + history := []v1.History{ + {Comment: options.CommitMessage}, + } + + config := v1.Image{ + Config: ic, + History: history, + OS: options.OS, + Architecture: options.Arch, + Variant: options.Variant, + } + + u, err := url.ParseRequestURI(path) + if err == nil && u.Scheme != "" { + // If source is a URL, download the file. + fmt.Printf("Downloading from %q\n", path) + file, err := download.FromURL(r.systemContext.BigFilesTemporaryDir, path) + if err != nil { + return "", err + } + defer os.Remove(file) + path = file + } else if path == "-" { + // "-" special cases stdin + path = os.Stdin.Name() + } + + srcRef, err := tarballTransport.Transport.ParseReference(path) + if err != nil { + return "", err + } + + updater, ok := srcRef.(tarballTransport.ConfigUpdater) + if !ok { + return "", errors.New("unexpected type, a tarball reference should implement tarball.ConfigUpdater") + } + annotations := make(map[string]string) + if err := updater.ConfigUpdate(config, annotations); err != nil { + return "", err + } + + id, err := getImageID(ctx, srcRef, r.systemContextCopy()) + if err != nil { + return "", err + } + + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, id) + if err != nil { + return "", err + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return "", err + } + defer c.close() + + if _, err := c.copy(ctx, srcRef, destRef); err != nil { + return "", err + } + + // Strip the leading @ off the id. + name := id[1:] + + // If requested, tag the imported image. + if options.Tag != "" { + image, _, err := r.LookupImage(name, nil) + if err != nil { + return "", errors.Wrap(err, "looking up imported image") + } + if err := image.Tag(options.Tag); err != nil { + return "", err + } + } + + return "sha256:" + name, nil +} diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go new file mode 100644 index 00000000000..05d60edfc1e --- /dev/null +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -0,0 +1,232 @@ +package libimage + +import ( + "context" + "time" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// ImageData contains the inspected data of an image. +type ImageData struct { + ID string `json:"Id"` + Digest digest.Digest `json:"Digest"` + RepoTags []string `json:"RepoTags"` + RepoDigests []string `json:"RepoDigests"` + Parent string `json:"Parent"` + Comment string `json:"Comment"` + Created *time.Time `json:"Created"` + Config *ociv1.ImageConfig `json:"Config"` + Version string `json:"Version"` + Author string `json:"Author"` + Architecture string `json:"Architecture"` + Os string `json:"Os"` + Size int64 `json:"Size"` + VirtualSize int64 `json:"VirtualSize"` + GraphDriver *DriverData `json:"GraphDriver"` + RootFS *RootFS `json:"RootFS"` + Labels map[string]string `json:"Labels"` + Annotations map[string]string `json:"Annotations"` + ManifestType string `json:"ManifestType"` + User string `json:"User"` + History []ociv1.History `json:"History"` + NamesHistory []string `json:"NamesHistory"` + HealthCheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` +} + +// DriverData includes data on the storage driver of the image. +type DriverData struct { + Name string `json:"Name"` + Data map[string]string `json:"Data"` +} + +// RootFS includes data on the root filesystem of the image. +type RootFS struct { + Type string `json:"Type"` + Layers []digest.Digest `json:"Layers"` +} + +// InspectOptions allow for customizing inspecting images. +type InspectOptions struct { + // Compute the size of the image (expensive). + WithSize bool + // Compute the parent of the image (expensive). + WithParent bool +} + +// Inspect inspects the image. +func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageData, error) { + logrus.Debugf("Inspecting image %s", i.ID()) + + if options == nil { + options = &InspectOptions{} + } + + if i.cached.completeInspectData != nil { + if options.WithSize && i.cached.completeInspectData.Size == int64(-1) { + size, err := i.Size() + if err != nil { + return nil, err + } + i.cached.completeInspectData.Size = size + } + if options.WithParent && i.cached.completeInspectData.Parent == "" { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + i.cached.completeInspectData.Parent = parentImage.ID() + } + } + return i.cached.completeInspectData, nil + } + + // First assemble data that does not depend on the format of the image. + info, err := i.inspectInfo(ctx) + if err != nil { + return nil, err + } + ociImage, err := i.toOCI(ctx) + if err != nil { + return nil, err + } + + repoTags, err := i.RepoTags() + if err != nil { + return nil, err + } + repoDigests, err := i.RepoDigests() + if err != nil { + return nil, err + } + driverData, err := i.driverData() + if err != nil { + return nil, err + } + + size := int64(-1) + if options.WithSize { + size, err = i.Size() + if err != nil { + return nil, err + } + } + + data := &ImageData{ + ID: i.ID(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Created: ociImage.Created, + Author: ociImage.Author, + Architecture: ociImage.Architecture, + Os: ociImage.OS, + Config: &ociImage.Config, + Version: info.DockerVersion, + Size: size, + VirtualSize: size, // TODO: they should be different (inherited from Podman) + Digest: i.Digest(), + Labels: info.Labels, + RootFS: &RootFS{ + Type: ociImage.RootFS.Type, + Layers: ociImage.RootFS.DiffIDs, + }, + GraphDriver: driverData, + User: ociImage.Config.User, + History: ociImage.History, + NamesHistory: i.NamesHistory(), + } + + if options.WithParent { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + data.Parent = parentImage.ID() + } + } + + // Determine the format of the image. How we determine certain data + // depends on the format (e.g., Docker v2s2, OCI v1). + src, err := i.source(ctx) + if err != nil { + return nil, err + } + manifestRaw, manifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + data.ManifestType = manifestType + + switch manifestType { + // OCI image + case ociv1.MediaTypeImageManifest: + var ociManifest ociv1.Manifest + if err := json.Unmarshal(manifestRaw, &ociManifest); err != nil { + return nil, err + } + data.Annotations = ociManifest.Annotations + if len(ociImage.History) > 0 { + data.Comment = ociImage.History[0].Comment + } + + // Docker image + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema2MediaType: + rawConfig, err := i.rawConfigBlob(ctx) + if err != nil { + return nil, err + } + var dockerManifest manifest.Schema2V1Image + if err := json.Unmarshal(rawConfig, &dockerManifest); err != nil { + return nil, err + } + data.Comment = dockerManifest.Comment + // NOTE: Health checks may be listed in the container config or + // the config. + data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck + if data.HealthCheck == nil { + data.HealthCheck = dockerManifest.Config.Healthcheck + } + } + + if data.Annotations == nil { + // Podman compat + data.Annotations = make(map[string]string) + } + + i.cached.completeInspectData = data + + return data, nil +} + +// inspectInfo returns the image inspect info. +func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error) { + if i.cached.partialInspectData != nil { + return i.cached.partialInspectData, nil + } + + ref, err := i.StorageReference() + if err != nil { + return nil, err + } + + img, err := ref.NewImage(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + defer img.Close() + + data, err := img.Inspect(ctx) + if err != nil { + return nil, err + } + + i.cached.partialInspectData = data + return data, nil +} diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go new file mode 100644 index 00000000000..05f21531b0a --- /dev/null +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -0,0 +1,299 @@ +package libimage + +import ( + "context" + + "github.com/containers/storage" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// layerTree is an internal representation of local layers. +type layerTree struct { + // nodes is the actual layer tree with layer IDs being keys. + nodes map[string]*layerNode + // ociCache is a cache for Image.ID -> OCI Image. Translations are done + // on-demand. + ociCache map[string]*ociv1.Image + // emptyImages do not have any top-layer so we cannot create a + // *layerNode for them. + emptyImages []*Image +} + +// node returns a layerNode for the specified layerID. +func (t *layerTree) node(layerID string) *layerNode { + node, exists := t.nodes[layerID] + if !exists { + node = &layerNode{} + t.nodes[layerID] = node + } + return node +} + +// toOCI returns an OCI image for the specified image. +func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) { + var err error + oci, exists := t.ociCache[i.ID()] + if !exists { + oci, err = i.toOCI(ctx) + if err == nil { + t.ociCache[i.ID()] = oci + } + } + return oci, err +} + +// layerNode is a node in a layerTree. It's ID is the key in a layerTree. +type layerNode struct { + children []*layerNode + images []*Image + parent *layerNode + layer *storage.Layer +} + +// repoTags assemble all repo tags all of images of the layer node. +func (l *layerNode) repoTags() ([]string, error) { + orderedTags := []string{} + visitedTags := make(map[string]bool) + + for _, image := range l.images { + repoTags, err := image.RepoTags() + if err != nil { + return nil, err + } + for _, tag := range repoTags { + if _, visited := visitedTags[tag]; visited { + continue + } + visitedTags[tag] = true + orderedTags = append(orderedTags, tag) + } + } + + return orderedTags, nil +} + +// layerTree extracts a layerTree from the layers in the local storage and +// relates them to the specified images. +func (r *Runtime) layerTree() (*layerTree, error) { + layers, err := r.store.Layers() + if err != nil { + return nil, err + } + + images, err := r.ListImages(context.Background(), nil, nil) + if err != nil { + return nil, err + } + + tree := layerTree{ + nodes: make(map[string]*layerNode), + ociCache: make(map[string]*ociv1.Image), + } + + // First build a tree purely based on layer information. + for i := range layers { + node := tree.node(layers[i].ID) + node.layer = &layers[i] + if layers[i].Parent == "" { + continue + } + parent := tree.node(layers[i].Parent) + node.parent = parent + parent.children = append(parent.children, node) + } + + // Now assign the images to each (top) layer. + for i := range images { + img := images[i] // do not leak loop variable outside the scope + topLayer := img.TopLayer() + if topLayer == "" { + tree.emptyImages = append(tree.emptyImages, img) + continue + } + node, exists := tree.nodes[topLayer] + if !exists { + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", topLayer, img.ID()) + continue + } + node.images = append(node.images, img) + } + + return &tree, nil +} + +// children returns the child images of parent. Child images are images with +// either the same top layer as parent or parent being the true parent layer. +// Furthermore, the history of the parent and child images must match with the +// parent having one history item less. If all is true, all images are +// returned. Otherwise, the first image is returned. Note that manifest lists +// do not have children. +func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*Image, error) { + if parent.TopLayer() == "" { + if isManifestList, _ := parent.IsManifestList(ctx); isManifestList { + return nil, nil + } + } + + parentID := parent.ID() + parentOCI, err := t.toOCI(ctx, parent) + if err != nil { + return nil, err + } + + // checkParent returns true if child and parent are in such a relation. + checkParent := func(child *Image) (bool, error) { + if parentID == child.ID() { + return false, nil + } + childOCI, err := t.toOCI(ctx, child) + if err != nil { + return false, err + } + // History check. + return areParentAndChild(parentOCI, childOCI), nil + } + + var children []*Image + + // Empty images are special in that they do not have any physical layer + // but yet can have a parent-child relation. Hence, compare the + // "parent" image to all other known empty images. + if parent.TopLayer() == "" { + for i := range t.emptyImages { + empty := t.emptyImages[i] + isParent, err := checkParent(empty) + if err != nil { + return nil, err + } + if isParent { + children = append(children, empty) + if !all { + break + } + } + } + return children, nil + } + + parentNode, exists := t.nodes[parent.TopLayer()] + if !exists { + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", parent.TopLayer()) + return children, nil + } + + // addChildrenFrom adds child images of parent to children. Returns + // true if any image is a child of parent. + addChildrenFromNode := func(node *layerNode) (bool, error) { + foundChildren := false + for i, childImage := range node.images { + isChild, err := checkParent(childImage) + if err != nil { + return foundChildren, err + } + if isChild { + foundChildren = true + children = append(children, node.images[i]) + if all { + return foundChildren, nil + } + } + } + return foundChildren, nil + } + + // First check images where parent's top layer is also the parent + // layer. + for _, childNode := range parentNode.children { + found, err := addChildrenFromNode(childNode) + if err != nil { + return nil, err + } + if found && all { + return children, nil + } + } + + // Now check images with the same top layer. + if _, err := addChildrenFromNode(parentNode); err != nil { + return nil, err + } + + return children, nil +} + +// parent returns the parent image or nil if no parent image could be found. +// Note that manifest lists do not have parents. +func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) { + if child.TopLayer() == "" { + if isManifestList, _ := child.IsManifestList(ctx); isManifestList { + return nil, nil + } + } + + childID := child.ID() + childOCI, err := t.toOCI(ctx, child) + if err != nil { + return nil, err + } + + // Empty images are special in that they do not have any physical layer + // but yet can have a parent-child relation. Hence, compare the + // "child" image to all other known empty images. + if child.TopLayer() == "" { + for _, empty := range t.emptyImages { + if childID == empty.ID() { + continue + } + emptyOCI, err := t.toOCI(ctx, empty) + if err != nil { + return nil, err + } + // History check. + if areParentAndChild(emptyOCI, childOCI) { + return empty, nil + } + } + return nil, nil + } + + node, exists := t.nodes[child.TopLayer()] + if !exists { + // Note: erroring out in this case has turned out having been a + // mistake. Users may not be able to recover, so we're now + // throwing a warning to guide them to resolve the issue and + // turn the errors non-fatal. + logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer()) + return nil, nil + } + + // Check images from the parent node (i.e., parent layer) and images + // with the same layer (i.e., same top layer). + images := node.images + if node.parent != nil { + images = append(images, node.parent.images...) + } + for _, parent := range images { + if parent.ID() == childID { + continue + } + parentOCI, err := t.toOCI(ctx, parent) + if err != nil { + return nil, err + } + // History check. + if areParentAndChild(parentOCI, childOCI) { + return parent, nil + } + } + + return nil, nil +} diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go new file mode 100644 index 00000000000..4dfac710654 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/load.go @@ -0,0 +1,133 @@ +package libimage + +import ( + "context" + "fmt" + "os" + "time" + + dirTransport "github.com/containers/image/v5/directory" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociTransport "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +type LoadOptions struct { + CopyOptions +} + +// Load loads one or more images (depending on the transport) from the +// specified path. The path may point to an image the following transports: +// oci, oci-archive, dir, docker-archive. +func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ([]string, error) { + logrus.Debugf("Loading image from %q", path) + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: "", Name: path, Time: time.Now(), Type: EventTypeImageLoad}) + } + + if options == nil { + options = &LoadOptions{} + } + + var loadErrors []error + + for _, f := range []func() ([]string, string, error){ + // OCI + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as an OCI directory", path) + ref, err := ociTransport.NewReference(path, "") + if err != nil { + return nil, ociTransport.Transport.Name(), err + } + images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) + return images, ociTransport.Transport.Name(), err + }, + + // OCI-ARCHIVE + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as an OCI archive", path) + ref, err := ociArchiveTransport.NewReference(path, "") + if err != nil { + return nil, ociArchiveTransport.Transport.Name(), err + } + images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) + return images, ociArchiveTransport.Transport.Name(), err + }, + + // DOCKER-ARCHIVE + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as a Docker archive", path) + ref, err := dockerArchiveTransport.ParseReference(path) + if err != nil { + return nil, dockerArchiveTransport.Transport.Name(), err + } + images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) + return images, dockerArchiveTransport.Transport.Name(), err + }, + + // DIR + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as a Docker dir", path) + ref, err := dirTransport.NewReference(path) + if err != nil { + return nil, dirTransport.Transport.Name(), err + } + images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) + return images, dirTransport.Transport.Name(), err + }, + } { + loadedImages, transportName, err := f() + if err == nil { + return loadedImages, nil + } + logrus.Debugf("Error loading %s (%s): %v", path, transportName, err) + loadErrors = append(loadErrors, fmt.Errorf("%s: %v", transportName, err)) + } + + // Give a decent error message if nothing above worked. + loadError := fmt.Errorf("payload does not match any of the supported image formats:") + for _, err := range loadErrors { + loadError = fmt.Errorf("%v\n * %v", loadError, err) + } + + return nil, loadError +} + +// loadMultiImageDockerArchive loads the docker archive specified by ref. In +// case the path@reference notation was used, only the specifiec image will be +// loaded. Otherwise, all images will be loaded. +func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { + // If we cannot stat the path, it either does not exist OR the correct + // syntax to reference an image within the archive was used, so we + // should. + path := ref.StringWithinTransport() + if _, err := os.Stat(path); err != nil { + return r.copyFromDockerArchive(ctx, ref, options) + } + + reader, err := dockerArchiveTransport.NewReader(r.systemContextCopy(), path) + if err != nil { + return nil, err + } + + refLists, err := reader.List() + if err != nil { + return nil, err + } + + var copiedImages []string + for _, list := range refLists { + for _, listRef := range list { + names, err := r.copyFromDockerArchiveReaderReference(ctx, reader, listRef, options) + if err != nil { + return nil, err + } + copiedImages = append(copiedImages, names...) + } + } + + return copiedImages, nil +} diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go new file mode 100644 index 00000000000..4e8959004bf --- /dev/null +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -0,0 +1,401 @@ +package libimage + +import ( + "context" + "fmt" + "time" + + "github.com/containers/common/libimage/manifests" + imageCopy "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// NOTE: the abstractions and APIs here are a first step to further merge +// `libimage/manifests` into `libimage`. + +// ErrNotAManifestList indicates that an image was found in the local +// containers storage but it is not a manifest list as requested. +var ErrNotAManifestList = errors.New("image is not a manifest list") + +// ManifestList represents a manifest list (Docker) or an image index (OCI) in +// the local containers storage. +type ManifestList struct { + // NOTE: the *List* suffix is intentional as the term "manifest" is + // used ambiguously across the ecosystem. It may refer to the (JSON) + // manifest of an ordinary image OR to a manifest *list* (Docker) or to + // image index (OCI). + // It's a bit more work when typing but without ambiguity. + + // The underlying image in the containers storage. + image *Image + + // The underlying manifest list. + list manifests.List +} + +// ID returns the ID of the manifest list. +func (m *ManifestList) ID() string { + return m.image.ID() +} + +// CreateManifestList creates a new empty manifest list with the specified +// name. +func (r *Runtime) CreateManifestList(name string) (*ManifestList, error) { + normalized, err := NormalizeName(name) + if err != nil { + return nil, err + } + + list := manifests.Create() + listID, err := list.SaveToImage(r.store, "", []string{normalized.String()}, manifest.DockerV2ListMediaType) + if err != nil { + return nil, err + } + + mList, err := r.LookupManifestList(listID) + if err != nil { + return nil, err + } + + return mList, nil +} + +// LookupManifestList looks up a manifest list with the specified name in the +// containers storage. +func (r *Runtime) LookupManifestList(name string) (*ManifestList, error) { + image, list, err := r.lookupManifestList(name) + if err != nil { + return nil, err + } + return &ManifestList{image: image, list: list}, nil +} + +func (r *Runtime) lookupManifestList(name string) (*Image, manifests.List, error) { + lookupOptions := &LookupImageOptions{ + lookupManifest: true, + } + image, _, err := r.LookupImage(name, lookupOptions) + if err != nil { + return nil, nil, err + } + if err := image.reload(); err != nil { + return nil, nil, err + } + list, err := image.getManifestList() + if err != nil { + return nil, nil, err + } + return image, list, nil +} + +// ToManifestList converts the image into a manifest list. An error is thrown +// if the image is no manifest list. +func (i *Image) ToManifestList() (*ManifestList, error) { + list, err := i.getManifestList() + if err != nil { + return nil, err + } + return &ManifestList{image: i, list: list}, nil +} + +// LookupInstance looks up an instance of the manifest list matching the +// specified platform. The local machine's platform is used if left empty. +func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, variant string) (*Image, error) { + sys := m.image.runtime.systemContextCopy() + if architecture != "" { + sys.ArchitectureChoice = architecture + } + if os != "" { + sys.OSChoice = os + } + if architecture != "" { + sys.VariantChoice = variant + } + + // Now look at the *manifest* and select a matching instance. + rawManifest, manifestType, err := m.image.Manifest(ctx) + if err != nil { + return nil, err + } + list, err := manifest.ListFromBlob(rawManifest, manifestType) + if err != nil { + return nil, err + } + instanceDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, err + } + + allImages, err := m.image.runtime.ListImages(ctx, nil, nil) + if err != nil { + return nil, err + } + + for _, image := range allImages { + for _, imageDigest := range append(image.Digests(), image.Digest()) { + if imageDigest == instanceDigest { + return image, nil + } + } + } + + return nil, errors.Wrapf(storage.ErrImageUnknown, "could not find image instance %s of manifest list %s in local containers storage", instanceDigest, m.ID()) +} + +// Saves the specified manifest list and reloads it from storage with the new ID. +func (m *ManifestList) saveAndReload() error { + newID, err := m.list.SaveToImage(m.image.runtime.store, m.image.ID(), nil, "") + if err != nil { + return err + } + + // Make sure to reload the image from the containers storage to fetch + // the latest data (e.g., new or delete digests). + if err := m.image.reload(); err != nil { + return err + } + image, list, err := m.image.runtime.lookupManifestList(newID) + if err != nil { + return err + } + m.image = image + m.list = list + return nil +} + +// getManifestList is a helper to obtain a manifest list +func (i *Image) getManifestList() (manifests.List, error) { + _, list, err := manifests.LoadFromImage(i.runtime.store, i.ID()) + return list, err +} + +// IsManifestList returns true if the image is a manifest list (Docker) or an +// image index (OCI). This information may be critical to make certain +// execution paths more robust (e.g., suppress certain errors). +func (i *Image) IsManifestList(ctx context.Context) (bool, error) { + ref, err := i.StorageReference() + if err != nil { + return false, err + } + imgRef, err := ref.NewImageSource(ctx, i.runtime.systemContextCopy()) + if err != nil { + return false, err + } + _, manifestType, err := imgRef.GetManifest(ctx, nil) + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(manifestType), nil +} + +// Inspect returns a dockerized version of the manifest list. +func (m *ManifestList) Inspect() (*manifest.Schema2List, error) { + return m.list.Docker(), nil +} + +// Options for adding a manifest list. +type ManifestListAddOptions struct { + // Add all images to the list if the to-be-added image itself is a + // manifest list. + All bool `json:"all"` + // containers-auth.json(5) file to use when authenticating against + // container registries. + AuthFilePath string + // Path to the certificates directory. + CertDirPath string + // Allow contacting registries over HTTP, or HTTPS with failed TLS + // verification. Note that this does not affect other TLS connections. + InsecureSkipTLSVerify types.OptionalBool + // Username to use when authenticating at a container registry. + Username string + // Password to use when authenticating at a container registry. + Password string +} + +// Add adds one or more manifests to the manifest list and returns the digest +// of the added instance. +func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestListAddOptions) (digest.Digest, error) { + if options == nil { + options = &ManifestListAddOptions{} + } + + ref, err := alltransports.ParseImageName(name) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), name) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + return "", err + } + } + + // Now massage in the copy-related options into the system context. + systemContext := m.image.runtime.systemContextCopy() + if options.AuthFilePath != "" { + systemContext.AuthFilePath = options.AuthFilePath + } + if options.CertDirPath != "" { + systemContext.DockerCertPath = options.CertDirPath + } + if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined { + systemContext.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify + systemContext.OCIInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + systemContext.DockerDaemonInsecureSkipTLSVerify = options.InsecureSkipTLSVerify == types.OptionalBoolTrue + } + if options.Username != "" { + systemContext.DockerAuthConfig = &types.DockerAuthConfig{ + Username: options.Username, + Password: options.Password, + } + } + + newDigest, err := m.list.Add(ctx, systemContext, ref, options.All) + if err != nil { + return "", err + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return "", err + } + return newDigest, nil +} + +// Options for annotationg a manifest list. +type ManifestListAnnotateOptions struct { + // Add the specified annotations to the added image. + Annotations map[string]string + // Add the specified architecture to the added image. + Architecture string + // Add the specified features to the added image. + Features []string + // Add the specified OS to the added image. + OS string + // Add the specified OS features to the added image. + OSFeatures []string + // Add the specified OS version to the added image. + OSVersion string + // Add the specified variant to the added image. + Variant string +} + +// Annotate an image instance specified by `d` in the manifest list. +func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAnnotateOptions) error { + if options == nil { + return nil + } + + if len(options.OS) > 0 { + if err := m.list.SetOS(d, options.OS); err != nil { + return err + } + } + if len(options.OSVersion) > 0 { + if err := m.list.SetOSVersion(d, options.OSVersion); err != nil { + return err + } + } + if len(options.Features) > 0 { + if err := m.list.SetFeatures(d, options.Features); err != nil { + return err + } + } + if len(options.OSFeatures) > 0 { + if err := m.list.SetOSFeatures(d, options.OSFeatures); err != nil { + return err + } + } + if len(options.Architecture) > 0 { + if err := m.list.SetArchitecture(d, options.Architecture); err != nil { + return err + } + } + if len(options.Variant) > 0 { + if err := m.list.SetVariant(d, options.Variant); err != nil { + return err + } + } + if len(options.Annotations) > 0 { + if err := m.list.SetAnnotations(&d, options.Annotations); err != nil { + return err + } + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return err + } + return nil +} + +// RemoveInstance removes the instance specified by `d` from the manifest list. +// Returns the new ID of the image. +func (m *ManifestList) RemoveInstance(d digest.Digest) error { + if err := m.list.Remove(d); err != nil { + return err + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return err + } + return nil +} + +// ManifestListPushOptions allow for customizing pushing a manifest list. +type ManifestListPushOptions struct { + CopyOptions + + // For tweaking the list selection. + ImageListSelection imageCopy.ImageListSelection + // Use when selecting only specific imags. + Instances []digest.Digest +} + +// Push pushes a manifest to the specified destination. +func (m *ManifestList) Push(ctx context.Context, destination string, options *ManifestListPushOptions) (digest.Digest, error) { + if options == nil { + options = &ManifestListPushOptions{} + } + + dest, err := alltransports.ParseImageName(destination) + if err != nil { + oldErr := err + dest, err = alltransports.ParseImageName("docker://" + destination) + if err != nil { + return "", oldErr + } + } + + if m.image.runtime.eventChannel != nil { + defer m.image.runtime.writeEvent(&Event{ID: m.ID(), Name: destination, Time: time.Now(), Type: EventTypeImagePush}) + } + + // NOTE: we're using the logic in copier to create a proper + // types.SystemContext. This prevents us from having an error prone + // code duplicate here. + copier, err := m.image.runtime.newCopier(&options.CopyOptions) + if err != nil { + return "", err + } + defer copier.close() + + pushOptions := manifests.PushOptions{ + Store: m.image.runtime.store, + SystemContext: copier.systemContext, + ImageListSelection: options.ImageListSelection, + Instances: options.Instances, + ReportWriter: options.Writer, + SignBy: options.SignBy, + RemoveSignatures: options.RemoveSignatures, + ManifestType: options.ManifestMIMEType, + } + + _, d, err := m.list.Push(ctx, dest, pushOptions) + return d, err +} diff --git a/vendor/github.com/containers/common/libimage/manifests/copy.go b/vendor/github.com/containers/common/libimage/manifests/copy.go new file mode 100644 index 00000000000..578b64ca838 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/manifests/copy.go @@ -0,0 +1,13 @@ +package manifests + +import ( + "github.com/containers/image/v5/signature" +) + +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go new file mode 100644 index 00000000000..2624dee785a --- /dev/null +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -0,0 +1,426 @@ +package manifests + +import ( + "context" + "encoding/json" + stderrors "errors" + "io" + + "github.com/containers/common/pkg/manifests" + "github.com/containers/common/pkg/supplemented" + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + is "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/lockfile" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const instancesData = "instances.json" + +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) + +// ErrListImageUnknown is returned when we attempt to create an image reference +// for a List that has not yet been saved to an image. +var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") + +type list struct { + manifests.List + instances map[digest.Digest]string +} + +// List is a manifest list or image index, either created using Create(), or +// loaded from local storage using LoadFromImage(). +type List interface { + manifests.List + SaveToImage(store storage.Store, imageID string, names []string, mimeType string) (string, error) + Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) + Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) + Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) +} + +// PushOptions includes various settings which are needed for pushing the +// manifest list and its instances. +type PushOptions struct { + Store storage.Store + SystemContext *types.SystemContext // github.com/containers/image/types.SystemContext + ImageListSelection cp.ImageListSelection // set to either CopySystemImage, CopyAllImages, or CopySpecificImages + Instances []digest.Digest // instances to copy if ImageListSelection == CopySpecificImages + ReportWriter io.Writer // will be used to log the writing of the list and any blobs + SignBy string // fingerprint of GPG key to use to sign images + RemoveSignatures bool // true to discard signatures in images + ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 + SourceFilter LookupReferenceFunc // filter the list source +} + +// Create creates a new list containing information about the specified image, +// computing its manifest's digest, and retrieving OS and architecture +// information from its configuration blob. Returns the new list, and the +// instanceDigest for the initial image. +func Create() List { + return &list{ + List: manifests.Create(), + instances: make(map[digest.Digest]string), + } +} + +// LoadFromImage reads the manifest list or image index, and additional +// information about where the various instances that it contains live, from an +// image record with the specified ID in local storage. +func LoadFromImage(store storage.Store, image string) (string, List, error) { + img, err := store.Image(image) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + } + manifestBytes, err := store.ImageBigData(img.ID, storage.ImageDigestManifestBigDataNamePrefix) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading manifest list", image) + } + manifestList, err := manifests.FromBlob(manifestBytes) + if err != nil { + return "", nil, err + } + list := &list{ + List: manifestList, + instances: make(map[digest.Digest]string), + } + instancesBytes, err := store.ImageBigData(img.ID, instancesData) + if err != nil { + return "", nil, errors.Wrapf(err, "error locating image %q for loading instance list", image) + } + if err := json.Unmarshal(instancesBytes, &list.instances); err != nil { + return "", nil, errors.Wrapf(err, "error decoding instance list for image %q", image) + } + list.instances[""] = img.ID + return img.ID, list, err +} + +// SaveToImage saves the manifest list or image index as the manifest of an +// Image record with the specified names in local storage, generating a random +// image ID if none is specified. It also stores information about where the +// images whose manifests are included in the list can be found. +func (l *list) SaveToImage(store storage.Store, imageID string, names []string, mimeType string) (string, error) { + manifestBytes, err := l.List.Serialize(mimeType) + if err != nil { + return "", err + } + instancesBytes, err := json.Marshal(&l.instances) + if err != nil { + return "", err + } + img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{}) + if err == nil || errors.Cause(err) == storage.ErrDuplicateID { + created := (err == nil) + if created { + imageID = img.ID + l.instances[""] = img.ID + } + err := store.SetImageBigData(imageID, storage.ImageDigestManifestBigDataNamePrefix, manifestBytes, manifest.Digest) + if err != nil { + if created { + if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { + logrus.Errorf("Deleting image %q after failing to save manifest for it", img.ID) + } + } + return "", errors.Wrapf(err, "saving manifest list to image %q", imageID) + } + err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil) + if err != nil { + if created { + if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { + logrus.Errorf("Deleting image %q after failing to save instance locations for it", img.ID) + } + } + return "", errors.Wrapf(err, "saving instance list to image %q", imageID) + } + return imageID, nil + } + return "", errors.Wrapf(err, "error creating image to hold manifest list") +} + +// Reference returns an image reference for the composite image being built +// in the list, or an error if the list has never been saved to a local image. +func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) { + if l.instances[""] == "" { + return nil, errors.Wrap(ErrListImageUnknown, "error building reference to list") + } + s, err := is.Transport.ParseStoreReference(store, l.instances[""]) + if err != nil { + return nil, errors.Wrapf(err, "error creating ImageReference from image %q", l.instances[""]) + } + references := make([]types.ImageReference, 0, len(l.instances)) + whichInstances := make([]digest.Digest, 0, len(l.instances)) + switch multiple { + case cp.CopyAllImages, cp.CopySystemImage: + for instance := range l.instances { + if instance != "" { + whichInstances = append(whichInstances, instance) + } + } + case cp.CopySpecificImages: + for instance := range l.instances { + for _, allowed := range instances { + if instance == allowed { + whichInstances = append(whichInstances, instance) + } + } + } + } + for _, instance := range whichInstances { + imageName := l.instances[instance] + ref, err := alltransports.ParseImageName(imageName) + if err != nil { + return nil, errors.Wrapf(err, "error creating ImageReference from image %q", imageName) + } + references = append(references, ref) + } + return supplemented.Reference(s, references, multiple, instances), nil +} + +// Push saves the manifest list and whichever blobs are needed to a destination location. +func (l *list) Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) { + // Load the system signing policy. + pushPolicy, err := signature.DefaultPolicy(options.SystemContext) + if err != nil { + return nil, "", errors.Wrapf(err, "error obtaining default signature policy") + } + + // Override the settings for local storage to make sure that we can always read the source "image". + pushPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes + + policyContext, err := signature.NewPolicyContext(pushPolicy) + if err != nil { + return nil, "", errors.Wrapf(err, "error creating new signature policy context") + } + defer func() { + if err2 := policyContext.Destroy(); err2 != nil { + logrus.Errorf("Destroying signature policy context: %v", err2) + } + }() + + // If we were given a media type that corresponds to a multiple-images + // type, reset it to a valid corresponding single-image type, since we + // already expect the image library to infer the list type from the + // image type that we're telling it to force. + singleImageManifestType := options.ManifestType + switch singleImageManifestType { + case v1.MediaTypeImageIndex: + singleImageManifestType = v1.MediaTypeImageManifest + case manifest.DockerV2ListMediaType: + singleImageManifestType = manifest.DockerV2Schema2MediaType + } + + // Build a source reference for our list and grab bag full of blobs. + src, err := l.Reference(options.Store, options.ImageListSelection, options.Instances) + if err != nil { + return nil, "", err + } + if options.SourceFilter != nil { + if src, err = options.SourceFilter(src); err != nil { + return nil, "", err + } + } + copyOptions := &cp.Options{ + ImageListSelection: options.ImageListSelection, + Instances: options.Instances, + SourceCtx: options.SystemContext, + DestinationCtx: options.SystemContext, + ReportWriter: options.ReportWriter, + RemoveSignatures: options.RemoveSignatures, + SignBy: options.SignBy, + ForceManifestMIMEType: singleImageManifestType, + } + + // Copy whatever we were asked to copy. + manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions) + if err != nil { + return nil, "", err + } + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, "", err + } + return nil, manifestDigest, nil +} + +// Add adds information about the specified image to the list, computing the +// image's manifest's digest, retrieving OS and architecture information from +// the image's configuration, and recording the image's reference so that it +// can be found at push-time. Returns the instanceDigest for the image. If +// the reference points to an image list, either all instances are added (if +// "all" is true), or the instance which matches "sys" (if "all" is false) will +// be added. +func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) { + src, err := ref.NewImageSource(ctx, sys) + if err != nil { + return "", errors.Wrapf(err, "error setting up to read manifest and configuration from %q", transports.ImageName(ref)) + } + defer src.Close() + + type instanceInfo struct { + instanceDigest *digest.Digest + OS, Architecture, OSVersion, Variant string + Features, OSFeatures, Annotations []string + Size int64 + } + var instanceInfos []instanceInfo + var manifestDigest digest.Digest + + primaryManifestBytes, primaryManifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return "", errors.Wrapf(err, "error reading manifest from %q", transports.ImageName(ref)) + } + + if manifest.MIMETypeIsMultiImage(primaryManifestType) { + lists, err := manifests.FromBlob(primaryManifestBytes) + if err != nil { + return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + } + if all { + for i, instance := range lists.OCIv1().Manifests { + platform := instance.Platform + if platform == nil { + platform = &v1.Platform{} + } + instanceDigest := instance.Digest + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), + OSFeatures: append([]string{}, platform.OSFeatures...), + Size: instance.Size, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + } else { + list, err := manifest.ListFromBlob(primaryManifestBytes, primaryManifestType) + if err != nil { + return "", errors.Wrapf(err, "error parsing manifest list in %q", transports.ImageName(ref)) + } + instanceDigest, err := list.ChooseInstance(sys) + if err != nil { + return "", errors.Wrapf(err, "error selecting image from manifest list in %q", transports.ImageName(ref)) + } + added := false + for i, instance := range lists.OCIv1().Manifests { + if instance.Digest != instanceDigest { + continue + } + platform := instance.Platform + if platform == nil { + platform = &v1.Platform{} + } + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), + OSFeatures: append([]string{}, platform.OSFeatures...), + Size: instance.Size, + } + instanceInfos = append(instanceInfos, instanceInfo) + added = true + } + if !added { + instanceInfo := instanceInfo{ + instanceDigest: &instanceDigest, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + } + } else { + instanceInfo := instanceInfo{ + instanceDigest: nil, + } + instanceInfos = append(instanceInfos, instanceInfo) + } + + for _, instanceInfo := range instanceInfos { + if instanceInfo.OS == "" || instanceInfo.Architecture == "" { + img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest)) + if err != nil { + return "", errors.Wrapf(err, "error reading configuration blob from %q", transports.ImageName(ref)) + } + config, err := img.OCIConfig(ctx) + if err != nil { + return "", errors.Wrapf(err, "error reading info about config blob from %q", transports.ImageName(ref)) + } + if instanceInfo.OS == "" { + instanceInfo.OS = config.OS + instanceInfo.OSVersion = config.OSVersion + instanceInfo.OSFeatures = config.OSFeatures + } + if instanceInfo.Architecture == "" { + instanceInfo.Architecture = config.Architecture + instanceInfo.Variant = config.Variant + } + } + manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) + if err != nil { + return "", errors.Wrapf(err, "error reading manifest from %q, instance %q", transports.ImageName(ref), instanceInfo.instanceDigest) + } + if instanceInfo.instanceDigest == nil { + manifestDigest, err = manifest.Digest(manifestBytes) + if err != nil { + return "", errors.Wrapf(err, "error computing digest of manifest from %q", transports.ImageName(ref)) + } + instanceInfo.instanceDigest = &manifestDigest + instanceInfo.Size = int64(len(manifestBytes)) + } else if manifestDigest == "" { + manifestDigest = *instanceInfo.instanceDigest + } + err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations) + if err != nil { + return "", errors.Wrapf(err, "error adding instance with digest %q", *instanceInfo.instanceDigest) + } + if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok { + l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref) + } + } + + return manifestDigest, nil +} + +// Remove filters out any instances in the list which match the specified digest. +func (l *list) Remove(instanceDigest digest.Digest) error { + err := l.List.Remove(instanceDigest) + if err == nil { + delete(l.instances, instanceDigest) + } + return err +} + +// LockerForImage returns a Locker for a given image record. It's recommended +// that processes which use LoadFromImage() to load a list from an image and +// then use that list's SaveToImage() method to save a modified version of the +// list to that image record use this lock to avoid accidentally wiping out +// changes that another process is also attempting to make. +func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) { + img, err := store.Image(image) + if err != nil { + return nil, errors.Wrapf(err, "locating image %q for locating lock", image) + } + d := digest.NewDigestFromEncoded(digest.Canonical, img.ID) + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "coercing image ID for %q into a digest", image) + } + return store.GetDigestLock(d) +} diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go new file mode 100644 index 00000000000..7ceb6283063 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -0,0 +1,181 @@ +package libimage + +import ( + "runtime" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/containers/image/v5/docker/reference" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NormalizePlatform normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will not be normalized. +func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + os, arch, variant = rawOS, rawArch, rawVariant + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + rawPlatform := os + "/" + arch + if variant != "" { + rawPlatform += "/" + variant + } + + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + +// NormalizeName normalizes the provided name according to the conventions by +// Podman and Buildah. If tag and digest are missing, the "latest" tag will be +// used. If it's a short name, it will be prefixed with "localhost/". +// +// References to docker.io are normalized according to the Docker conventions. +// For instance, "docker.io/foo" turns into "docker.io/library/foo". +func NormalizeName(name string) (reference.Named, error) { + // NOTE: this code is in symmetrie with containers/image/pkg/shortnames. + ref, err := reference.Parse(name) + if err != nil { + return nil, errors.Wrapf(err, "error normalizing name %q", name) + } + + named, ok := ref.(reference.Named) + if !ok { + return nil, errors.Errorf("%q is not a named reference", name) + } + + // Enforce "localhost" if needed. + registry := reference.Domain(named) + if !(strings.ContainsAny(registry, ".:") || registry == "localhost") { + name = toLocalImageName(ref.String()) + } + + // Another parse which also makes sure that docker.io references are + // correctly normalized (e.g., docker.io/alpine to + // docker.io/library/alpine). + named, err = reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + + if _, hasTag := named.(reference.NamedTagged); hasTag { + // Strip off the tag of a tagged and digested reference. + named, err = normalizeTaggedDigestedNamed(named) + if err != nil { + return nil, err + } + return named, nil + } + if _, hasDigest := named.(reference.Digested); hasDigest { + return named, nil + } + + // Make sure to tag "latest". + return reference.TagNameOnly(named), nil +} + +// prefix the specified name with "localhost/". +func toLocalImageName(name string) string { + return "localhost/" + strings.TrimLeft(name, "/") +} + +// NameTagPair represents a RepoTag of an image. +type NameTagPair struct { + // Name of the RepoTag. Maybe "". + Name string + // Tag of the RepoTag. Maybe "". + Tag string + + // for internal use + named reference.Named +} + +// ToNameTagsPairs splits repoTags into name&tag pairs. +// Guaranteed to return at least one pair. +func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) { + none := "" + + var pairs []NameTagPair + for i, named := range repoTags { + pair := NameTagPair{ + Name: named.Name(), + Tag: none, + named: repoTags[i], + } + + if tagged, isTagged := named.(reference.NamedTagged); isTagged { + pair.Tag = tagged.Tag() + } + pairs = append(pairs, pair) + } + + if len(pairs) == 0 { + pairs = append(pairs, NameTagPair{Name: none, Tag: none}) + } + return pairs, nil +} + +// normalizeTaggedDigestedString strips the tag off the specified string iff it +// is tagged and digested. Note that the tag is entirely ignored to match +// Docker behavior. +func normalizeTaggedDigestedString(s string) (string, error) { + // Note that the input string is not expected to be parseable, so we + // return it verbatim in error cases. + ref, err := reference.Parse(s) + if err != nil { + return "", err + } + named, ok := ref.(reference.Named) + if !ok { + return s, nil + } + named, err = normalizeTaggedDigestedNamed(named) + if err != nil { + return "", err + } + return named.String(), nil +} + +// normalizeTaggedDigestedNamed strips the tag off the specified named +// reference iff it is tagged and digested. Note that the tag is entirely +// ignored to match Docker behavior. +func normalizeTaggedDigestedNamed(named reference.Named) (reference.Named, error) { + _, isTagged := named.(reference.NamedTagged) + if !isTagged { + return named, nil + } + digested, isDigested := named.(reference.Digested) + if !isDigested { + return named, nil + } + + // Now strip off the tag. + newNamed := reference.TrimNamed(named) + // And re-add the digest. + newNamed, err := reference.WithDigest(newNamed, digested.Digest()) + if err != nil { + return named, err + } + logrus.Debugf("Stripped off tag from tagged and digested reference %q", named.String()) + return newNamed, nil +} diff --git a/vendor/github.com/containers/common/libimage/oci.go b/vendor/github.com/containers/common/libimage/oci.go new file mode 100644 index 00000000000..b88d6613d74 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/oci.go @@ -0,0 +1,97 @@ +package libimage + +import ( + "context" + + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// toOCI returns the image as OCI v1 image. +func (i *Image) toOCI(ctx context.Context) (*ociv1.Image, error) { + if i.cached.ociv1Image != nil { + return i.cached.ociv1Image, nil + } + ref, err := i.StorageReference() + if err != nil { + return nil, err + } + + img, err := ref.NewImage(ctx, i.runtime.systemContextCopy()) + if err != nil { + return nil, err + } + defer img.Close() + + return img.OCIConfig(ctx) +} + +// historiesMatch returns the number of entries in the histories which have the +// same contents +func historiesMatch(a, b []ociv1.History) int { + i := 0 + for i < len(a) && i < len(b) { + if a[i].Created != nil && b[i].Created == nil { + return i + } + if a[i].Created == nil && b[i].Created != nil { + return i + } + if a[i].Created != nil && b[i].Created != nil { + if !a[i].Created.Equal(*(b[i].Created)) { + return i + } + } + if a[i].CreatedBy != b[i].CreatedBy { + return i + } + if a[i].Author != b[i].Author { + return i + } + if a[i].Comment != b[i].Comment { + return i + } + if a[i].EmptyLayer != b[i].EmptyLayer { + return i + } + i++ + } + return i +} + +// areParentAndChild checks diff ID and history in the two images and return +// true if the second should be considered to be directly based on the first +func areParentAndChild(parent, child *ociv1.Image) bool { + // the child and candidate parent should share all of the + // candidate parent's diff IDs, which together would have + // controlled which layers were used + + // Both, child and parent, may be nil when the storage is left in an + // incoherent state. Issue #7444 describes such a case when a build + // has been killed. + if child == nil || parent == nil { + return false + } + + if len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) { + return false + } + childUsesCandidateDiffs := true + for i := range parent.RootFS.DiffIDs { + if child.RootFS.DiffIDs[i] != parent.RootFS.DiffIDs[i] { + childUsesCandidateDiffs = false + break + } + } + if !childUsesCandidateDiffs { + return false + } + // the child should have the same history as the parent, plus + // one more entry + if len(parent.History)+1 != len(child.History) { + return false + } + if historiesMatch(parent.History, child.History) != len(parent.History) { + return false + } + return true +} diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go new file mode 100644 index 00000000000..ff93b6ed888 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -0,0 +1,634 @@ +package libimage + +import ( + "context" + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/containers/common/pkg/config" + registryTransport "github.com/containers/image/v5/docker" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + dockerDaemonTransport "github.com/containers/image/v5/docker/daemon" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociTransport "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/pkg/shortnames" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + ociSpec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PullOptions allows for custommizing image pulls. +type PullOptions struct { + CopyOptions + + // If true, all tags of the image will be pulled from the container + // registry. Only supported for the docker transport. + AllTags bool +} + +// Pull pulls the specified name. Name may refer to any of the supported +// transports from github.com/containers/image. If no transport is encoded, +// name will be treated as a reference to a registry (i.e., docker transport). +// +// Note that pullPolicy is only used when pulling from a container registry but +// it *must* be different than the default value `config.PullPolicyUnsupported`. This +// way, callers are forced to decide on the pull behaviour. The reasoning +// behind is that some (commands of some) tools have different default pull +// policies (e.g., buildah-bud versus podman-build). Making the pull-policy +// choice explicit is an attempt to prevent silent regressions. +// +// The error is storage.ErrImageUnknown iff the pull policy is set to "never" +// and no local image has been found. This allows for an easier integration +// into some users of this package (e.g., Buildah). +func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullPolicy, options *PullOptions) ([]*Image, error) { + logrus.Debugf("Pulling image %s (policy: %s)", name, pullPolicy) + + if options == nil { + options = &PullOptions{} + } + + var possiblyUnqualifiedName string // used for short-name resolution + ref, err := alltransports.ParseImageName(name) + if err != nil { + // Check whether `name` points to a transport. If so, we + // return the error. Otherwise we assume that `name` refers to + // an image on a registry (e.g., "fedora"). + // + // NOTE: the `docker` transport is an exception to support a + // `pull docker:latest` which would otherwise return an error. + if t := alltransports.TransportFromImageName(name); t != nil && t.Name() != registryTransport.Transport.Name() { + return nil, err + } + + // If the image clearly refers to a local one, we can look it up directly. + // In fact, we need to since they are not parseable. + if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) { + if pullPolicy == config.PullPolicyAlways { + return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", name) + } + local, _, err := r.LookupImage(name, nil) + if err != nil { + return nil, err + } + return []*Image{local}, err + } + + // Docker compat: strip off the tag iff name is tagged and digested + // (e.g., fedora:latest@sha256...). In that case, the tag is stripped + // off and entirely ignored. The digest is the sole source of truth. + normalizedName, normalizeError := normalizeTaggedDigestedString(name) + if normalizeError != nil { + return nil, normalizeError + } + name = normalizedName + + // If the input does not include a transport assume it refers + // to a registry. + dockerRef, dockerErr := alltransports.ParseImageName("docker://" + name) + if dockerErr != nil { + return nil, err + } + ref = dockerRef + possiblyUnqualifiedName = name + } else if ref.Transport().Name() == registryTransport.Transport.Name() { + // Normalize the input if we're referring to the docker + // transport directly. That makes sure that a `docker://fedora` + // will resolve directly to `docker.io/library/fedora:latest` + // and not be subject to short-name resolution. + named := ref.DockerReference() + if named == nil { + return nil, errors.New("internal error: unexpected nil reference") + } + possiblyUnqualifiedName = named.String() + } + + if options.AllTags && ref.Transport().Name() != registryTransport.Transport.Name() { + return nil, errors.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name()) + } + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: "", Name: name, Time: time.Now(), Type: EventTypeImagePull}) + } + + // Some callers may set the platform via the system context at creation + // time of the runtime. We need this information to decide whether we + // need to enforce pulling from a registry (see + // containers/podman/issues/10682). + if options.Architecture == "" { + options.Architecture = r.systemContext.ArchitectureChoice + } + if options.OS == "" { + options.OS = r.systemContext.OSChoice + } + if options.Variant == "" { + options.Variant = r.systemContext.VariantChoice + } + + var ( + pulledImages []string + pullError error + ) + + // Dispatch the copy operation. + switch ref.Transport().Name() { + + // DOCKER REGISTRY + case registryTransport.Transport.Name(): + pulledImages, pullError = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options) + + // DOCKER ARCHIVE + case dockerArchiveTransport.Transport.Name(): + pulledImages, pullError = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions) + + // ALL OTHER TRANSPORTS + default: + pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions) + } + + if pullError != nil { + return nil, pullError + } + + localImages := []*Image{} + for _, name := range pulledImages { + local, _, err := r.LookupImage(name, nil) + if err != nil { + return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name) + } + localImages = append(localImages, local) + } + + return localImages, pullError +} + +// nameFromAnnotations returns a reference string to be used as an image name, +// or an empty string. The annotations map may be nil. +func nameFromAnnotations(annotations map[string]string) string { + if annotations == nil { + return "" + } + // buildkit/containerd are using a custom annotation see + // containers/podman/issues/12560. + if annotations["io.containerd.image.name"] != "" { + return annotations["io.containerd.image.name"] + } + return annotations[ociSpec.AnnotationRefName] +} + +// copyFromDefault is the default copier for a number of transports. Other +// transports require some specific dancing, sometimes Yoga. +func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { + c, err := r.newCopier(options) + if err != nil { + return nil, err + } + defer c.close() + + // Figure out a name for the storage destination. + var storageName, imageName string + switch ref.Transport().Name() { + + case dockerDaemonTransport.Transport.Name(): + // Normalize to docker.io if needed (see containers/podman/issues/10998). + named, err := reference.ParseNormalizedNamed(ref.StringWithinTransport()) + if err != nil { + return nil, err + } + imageName = named.String() + storageName = imageName + + case ociTransport.Transport.Name(): + split := strings.SplitN(ref.StringWithinTransport(), ":", 2) + storageName = toLocalImageName(split[0]) + imageName = storageName + + case ociArchiveTransport.Transport.Name(): + manifestDescriptor, err := ociArchiveTransport.LoadManifestDescriptor(ref) + if err != nil { + return nil, err + } + storageName = nameFromAnnotations(manifestDescriptor.Annotations) + switch len(storageName) { + case 0: + // If there's no reference name in the annotations, compute an ID. + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] + default: + named, err := NormalizeName(storageName) + if err != nil { + return nil, err + } + imageName = named.String() + storageName = imageName + } + + case storageTransport.Transport.Name(): + storageName = ref.StringWithinTransport() + named := ref.DockerReference() + if named == nil { + return nil, errors.Errorf("could not get an image name for storage reference %q", ref) + } + imageName = named.String() + + default: + // Path-based transports (e.g., dir) may include invalid + // characters, so we should pessimistically generate an ID + // instead of looking at the StringWithinTransport(). + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] + } + + // Create a storage reference. + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, storageName) + if err != nil { + return nil, errors.Wrapf(err, "parsing %q", storageName) + } + + _, err = c.copy(ctx, ref, destRef) + return []string{imageName}, err +} + +// storageReferencesFromArchiveReader returns a slice of image references inside the +// archive reader. A docker archive may include more than one image and this +// method allows for extracting them into containers storage references which +// can later be used from copying. +func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Context, readerRef types.ImageReference, reader *dockerArchiveTransport.Reader) ([]types.ImageReference, []string, error) { + destNames, err := reader.ManifestTagsForReference(readerRef) + if err != nil { + return nil, nil, err + } + + var imageNames []string + if len(destNames) == 0 { + destName, err := getImageID(ctx, readerRef, &r.systemContext) + if err != nil { + return nil, nil, err + } + destNames = append(destNames, destName) + // Make sure the image can be loaded after the pull by + // replacing the @ with sha256:. + imageNames = append(imageNames, "sha256:"+destName[1:]) + } else { + for i := range destNames { + ref, err := NormalizeName(destNames[i]) + if err != nil { + return nil, nil, err + } + destNames[i] = ref.String() + } + imageNames = destNames + } + + references := []types.ImageReference{} + for _, destName := range destNames { + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName) + if err != nil { + return nil, nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName) + } + references = append(references, destRef) + } + + return references, imageNames, nil +} + +// copyFromDockerArchive copies one image from the specified reference. +func (r *Runtime) copyFromDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { + // There may be more than one image inside the docker archive, so we + // need a quick glimpse inside. + reader, readerRef, err := dockerArchiveTransport.NewReaderForReference(&r.systemContext, ref) + if err != nil { + return nil, err + } + + return r.copyFromDockerArchiveReaderReference(ctx, reader, readerRef, options) +} + +// copyFromDockerArchiveReaderReference copies the specified readerRef from reader. +func (r *Runtime) copyFromDockerArchiveReaderReference(ctx context.Context, reader *dockerArchiveTransport.Reader, readerRef types.ImageReference, options *CopyOptions) ([]string, error) { + c, err := r.newCopier(options) + if err != nil { + return nil, err + } + defer c.close() + + // Get a slice of storage references we can copy. + references, destNames, err := r.storageReferencesReferencesFromArchiveReader(ctx, readerRef, reader) + if err != nil { + return nil, err + } + + // Now copy all of the images. Use readerRef for performance. + for _, destRef := range references { + if _, err := c.copy(ctx, readerRef, destRef); err != nil { + return nil, err + } + } + + return destNames, nil +} + +// copyFromRegistry pulls the specified, possibly unqualified, name from a +// registry. On successful pull it returns the ID of the image in local +// storage. +// +// If options.All is set, all tags from the specified registry will be pulled. +func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference, inputName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) { + // Sanity check. + if err := pullPolicy.Validate(); err != nil { + return nil, err + } + + if !options.AllTags { + return r.copySingleImageFromRegistry(ctx, inputName, pullPolicy, options) + } + + // Copy all tags + named := reference.TrimNamed(ref.DockerReference()) + tags, err := registryTransport.GetRepositoryTags(ctx, &r.systemContext, ref) + if err != nil { + return nil, err + } + + pulledIDs := []string{} + for _, tag := range tags { + select { // Let's be gentle with Podman remote. + case <-ctx.Done(): + return nil, errors.Errorf("pulling cancelled") + default: + // We can continue. + } + tagged, err := reference.WithTag(named, tag) + if err != nil { + return nil, errors.Wrapf(err, "error creating tagged reference (name %s, tag %s)", named.String(), tag) + } + pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options) + if err != nil { + return nil, err + } + pulledIDs = append(pulledIDs, pulled...) + } + + return pulledIDs, nil +} + +// imageIDsForManifest() parses the manifest of the copied image and then looks +// up the IDs of the matching image. There's a small slice of time, between +// when we copy the image into local storage and when we go to look for it +// using the name that we gave it when we copied it, when the name we wanted to +// assign to the image could have been moved, but the image's ID will remain +// the same until it is deleted. +func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemContext) ([]string, error) { + var imageDigest digest.Digest + manifestType := manifest.GuessMIMEType(manifestBytes) + if manifest.MIMETypeIsMultiImage(manifestType) { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing manifest list") + } + d, err := list.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing instance from manifest list") + } + imageDigest = d + } else { + d, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "digesting manifest") + } + imageDigest = d + } + var results []string + images, err := r.store.ImagesByDigest(imageDigest) + if err != nil { + return nil, errors.Wrapf(err, "listing images by manifest digest") + } + for _, image := range images { + results = append(results, image.ID) + } + if len(results) == 0 { + return nil, errors.Wrapf(storage.ErrImageUnknown, "identifying new image by manifest digest") + } + return results, nil +} + +// copySingleImageFromRegistry pulls the specified, possibly unqualified, name +// from a registry. On successful pull it returns the ID of the image in local +// storage. +func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy config.PullPolicy, options *PullOptions) ([]string, error) { //nolint:gocyclo + // Sanity check. + if err := pullPolicy.Validate(); err != nil { + return nil, err + } + + var ( + localImage *Image + resolvedImageName string + err error + ) + + // Always check if there's a local image. If so, we should use its + // resolved name for pulling. Assume we're doing a `pull foo`. + // If there's already a local image "localhost/foo", then we should + // attempt pulling that instead of doing the full short-name dance. + // + // NOTE that we only do platform checks if the specified values differ + // from the local platform. Unfortunately, there are many images used + // in the wild which don't set the correct value(s) in the config + // causing various issues such as containers/podman/issues/10682. + lookupImageOptions := &LookupImageOptions{Variant: options.Variant} + if options.Architecture != runtime.GOARCH { + lookupImageOptions.Architecture = options.Architecture + } + if options.OS != runtime.GOOS { + lookupImageOptions.OS = options.OS + } + localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + logrus.Errorf("Looking up %s in local storage: %v", imageName, err) + } + + // If the local image is corrupted, we need to repull it. + if localImage != nil { + if err := localImage.isCorrupted(imageName); err != nil { + logrus.Error(err) + localImage = nil + } + } + + customPlatform := len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 + if customPlatform && pullPolicy != config.PullPolicyAlways && pullPolicy != config.PullPolicyNever { + // Unless the pull policy is always/never, we must + // pessimistically assume that the local image has an invalid + // architecture (see containers/podman/issues/10682). Hence, + // whenever the user requests a custom platform, set the pull + // policy to "newer" to make sure we're pulling down the + // correct image. + // + // NOTE that this is will even override --pull={false,never}. + pullPolicy = config.PullPolicyNewer + logrus.Debugf("Enforcing pull policy to %q to pull custom platform (arch: %q, os: %q, variant: %q) - local image may mistakenly specify wrong platform", pullPolicy, options.Architecture, options.OS, options.Variant) + } + + if pullPolicy == config.PullPolicyNever { + if localImage != nil { + logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) + return []string{resolvedImageName}, nil + } + logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) + return nil, errors.Wrap(storage.ErrImageUnknown, imageName) + } + + if pullPolicy == config.PullPolicyMissing && localImage != nil { + return []string{resolvedImageName}, nil + } + + // If we looked up the image by ID, we cannot really pull from anywhere. + if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) { + switch pullPolicy { + case config.PullPolicyAlways: + return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName) + default: + return []string{resolvedImageName}, nil + } + } + + // If we found a local image, we should use its locally resolved name + // (see containers/buildah/issues/2904). An exception is if a custom + // platform is specified (e.g., `--arch=arm64`). In that case, we need + // to pessimistically pull the image since some images declare wrong + // platforms making platform checks absolutely unreliable (see + // containers/podman/issues/10682). + // + // In other words: multi-arch support can only be as good as the images + // in the wild, so we shouldn't break things for our users by trying to + // insist that they make sense. + if localImage != nil && !customPlatform { + if imageName != resolvedImageName { + logrus.Debugf("Image %s resolved to local image %s which will be used for pulling", imageName, resolvedImageName) + } + imageName = resolvedImageName + } + + sys := r.systemContextCopy() + resolved, err := shortnames.Resolve(sys, imageName) + if err != nil { + // TODO: that is a too big of a hammer since we should only + // ignore errors that indicate that there's no alias and no + // USRs. Must be addressed in c/image first. + if localImage != nil && pullPolicy == config.PullPolicyNewer { + return []string{resolvedImageName}, nil + } + return nil, err + } + + // NOTE: Below we print the description from the short-name resolution. + // In theory we could print it here. In practice, however, this is + // causing a hard time for Buildah uses who are doing a `buildah from + // image` and expect just the container name to be printed if the image + // is present locally. + // The pragmatic solution is to only print the description when we found + // a _newer_ image that we're about to pull. + wroteDesc := false + writeDesc := func() error { + if wroteDesc { + return nil + } + wroteDesc = true + if desc := resolved.Description(); len(desc) > 0 { + logrus.Debug(desc) + if options.Writer != nil { + if _, err := options.Writer.Write([]byte(desc + "\n")); err != nil { + return err + } + } + } + return nil + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return nil, err + } + defer c.close() + + var pullErrors []error + for _, candidate := range resolved.PullCandidates { + candidateString := candidate.Value.String() + logrus.Debugf("Attempting to pull candidate %s for %s", candidateString, imageName) + srcRef, err := registryTransport.NewReference(candidate.Value) + if err != nil { + return nil, err + } + + if pullPolicy == config.PullPolicyNewer && localImage != nil { + isNewer, err := localImage.hasDifferentDigestWithSystemContext(ctx, srcRef, c.systemContext) + if err != nil { + pullErrors = append(pullErrors, err) + continue + } + + if !isNewer { + logrus.Debugf("Skipping pull candidate %s as the image is not newer (pull policy %s)", candidateString, pullPolicy) + continue + } + } + + destRef, err := storageTransport.Transport.ParseStoreReference(r.store, candidate.Value.String()) + if err != nil { + return nil, err + } + + if err := writeDesc(); err != nil { + return nil, err + } + if options.Writer != nil { + if _, err := io.WriteString(options.Writer, fmt.Sprintf("Trying to pull %s...\n", candidateString)); err != nil { + return nil, err + } + } + var manifestBytes []byte + if manifestBytes, err = c.copy(ctx, srcRef, destRef); err != nil { + logrus.Debugf("Error pulling candidate %s: %v", candidateString, err) + pullErrors = append(pullErrors, err) + continue + } + if err := candidate.Record(); err != nil { + // Only log the recording errors. Podman has seen + // reports where users set most of the system to + // read-only which can cause issues. + logrus.Errorf("Error recording short-name alias %q: %v", candidateString, err) + } + + logrus.Debugf("Pulled candidate %s successfully", candidateString) + if ids, err := r.imagesIDsForManifest(manifestBytes, sys); err == nil { + return ids, nil + } + return []string{candidate.Value.String()}, nil + } + + if localImage != nil && pullPolicy == config.PullPolicyNewer { + return []string{resolvedImageName}, nil + } + + if len(pullErrors) == 0 { + return nil, errors.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy) + } + + return nil, resolved.FormatPullErrors(pullErrors) +} diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go new file mode 100644 index 00000000000..7203838aa61 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/push.go @@ -0,0 +1,89 @@ +package libimage + +import ( + "context" + "time" + + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports/alltransports" + "github.com/sirupsen/logrus" +) + +// PushOptions allows for custommizing image pushes. +type PushOptions struct { + CopyOptions +} + +// Push pushes the specified source which must refer to an image in the local +// containers storage. It may or may not have the `containers-storage:` +// prefix. Use destination to push to a custom destination. The destination +// can refer to any supported transport. If not transport is specified, the +// docker transport (i.e., a registry) is implied. If destination is left +// empty, the docker destination will be extrapolated from the source. +// +// Return storage.ErrImageUnknown if source could not be found in the local +// containers storage. +func (r *Runtime) Push(ctx context.Context, source, destination string, options *PushOptions) ([]byte, error) { + if options == nil { + options = &PushOptions{} + } + + // Look up the local image. Note that we need to ignore the platform + // and push what the user specified (containers/podman/issues/10344). + image, resolvedSource, err := r.LookupImage(source, nil) + if err != nil { + return nil, err + } + + srcRef, err := image.StorageReference() + if err != nil { + return nil, err + } + + // Make sure we have a proper destination, and parse it into an image + // reference for copying. + if destination == "" { + // Doing an ID check here is tempting but false positives (due + // to a short partial IDs) are more painful than false + // negatives. + destination = resolvedSource + } + + logrus.Debugf("Pushing image %s to %s", source, destination) + + destRef, err := alltransports.ParseImageName(destination) + if err != nil { + // If the input does not include a transport assume it refers + // to a registry. + dockerRef, dockerErr := alltransports.ParseImageName("docker://" + destination) + if dockerErr != nil { + return nil, err + } + destRef = dockerRef + } + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: image.ID(), Name: destination, Time: time.Now(), Type: EventTypeImagePush}) + } + + // Buildah compat: Make sure to tag the destination image if it's a + // Docker archive. This way, we preserve the image name. + if destRef.Transport().Name() == dockerArchiveTransport.Transport.Name() { + if named, err := reference.ParseNamed(resolvedSource); err == nil { + tagged, isTagged := named.(reference.NamedTagged) + if isTagged { + options.dockerArchiveAdditionalTags = []reference.NamedTagged{tagged} + } + } + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return nil, err + } + + defer c.close() + + return c.copy(ctx, srcRef, destRef) +} diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go new file mode 100644 index 00000000000..974b50b50bb --- /dev/null +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -0,0 +1,756 @@ +package libimage + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/shortnames" + storageTransport "github.com/containers/image/v5/storage" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + deepcopy "github.com/jinzhu/copier" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Faster than the standard library, see https://github.com/json-iterator/go. +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// tmpdir returns a path to a temporary directory. +func tmpdir() string { + tmpdir := os.Getenv("TMPDIR") + if tmpdir == "" { + tmpdir = "/var/tmp" + } + + return tmpdir +} + +// RuntimeOptions allow for creating a customized Runtime. +type RuntimeOptions struct { + // The base system context of the runtime which will be used throughout + // the entire lifespan of the Runtime. Certain options in some + // functions may override specific fields. + SystemContext *types.SystemContext +} + +// setRegistriesConfPath sets the registries.conf path for the specified context. +func setRegistriesConfPath(systemContext *types.SystemContext) { + if systemContext.SystemRegistriesConfPath != "" { + return + } + if envOverride, ok := os.LookupEnv("CONTAINERS_REGISTRIES_CONF"); ok { + systemContext.SystemRegistriesConfPath = envOverride + return + } + if envOverride, ok := os.LookupEnv("REGISTRIES_CONFIG_PATH"); ok { + systemContext.SystemRegistriesConfPath = envOverride + return + } +} + +// Runtime is responsible for image management and storing them in a containers +// storage. +type Runtime struct { + // Use to send events out to users. + eventChannel chan *Event + // Underlying storage store. + store storage.Store + // Global system context. No pointer to simplify copying and modifying + // it. + systemContext types.SystemContext +} + +// Returns a copy of the runtime's system context. +func (r *Runtime) SystemContext() *types.SystemContext { + return r.systemContextCopy() +} + +// Returns a copy of the runtime's system context. +func (r *Runtime) systemContextCopy() *types.SystemContext { + var sys types.SystemContext + _ = deepcopy.Copy(&sys, &r.systemContext) + return &sys +} + +// EventChannel creates a buffered channel for events that the Runtime will use +// to write events to. Callers are expected to read from the channel in a +// timely manner. +// Can be called once for a given Runtime. +func (r *Runtime) EventChannel() chan *Event { + if r.eventChannel != nil { + return r.eventChannel + } + r.eventChannel = make(chan *Event, 100) + return r.eventChannel +} + +// RuntimeFromStore returns a Runtime for the specified store. +func RuntimeFromStore(store storage.Store, options *RuntimeOptions) (*Runtime, error) { + if options == nil { + options = &RuntimeOptions{} + } + + var systemContext types.SystemContext + if options.SystemContext != nil { + systemContext = *options.SystemContext + } else { + systemContext = types.SystemContext{} + } + if systemContext.BigFilesTemporaryDir == "" { + systemContext.BigFilesTemporaryDir = tmpdir() + } + + setRegistriesConfPath(&systemContext) + + return &Runtime{ + store: store, + systemContext: systemContext, + }, nil +} + +// RuntimeFromStoreOptions returns a return for the specified store options. +func RuntimeFromStoreOptions(runtimeOptions *RuntimeOptions, storeOptions *storage.StoreOptions) (*Runtime, error) { + if storeOptions == nil { + storeOptions = &storage.StoreOptions{} + } + store, err := storage.GetStore(*storeOptions) + if err != nil { + return nil, err + } + storageTransport.Transport.SetStore(store) + return RuntimeFromStore(store, runtimeOptions) +} + +// Shutdown attempts to free any kernel resources which are being used by the +// underlying driver. If "force" is true, any mounted (i.e., in use) layers +// are unmounted beforehand. If "force" is not true, then layers being in use +// is considered to be an error condition. +func (r *Runtime) Shutdown(force bool) error { + _, err := r.store.Shutdown(force) + if r.eventChannel != nil { + close(r.eventChannel) + } + return err +} + +// storageToImage transforms a storage.Image to an Image. +func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageReference) *Image { + return &Image{ + runtime: r, + storageImage: storageImage, + storageReference: ref, + } +} + +// Exists returns true if the specicifed image exists in the local containers +// storage. Note that it may return false if an image corrupted. +func (r *Runtime) Exists(name string) (bool, error) { + image, _, err := r.LookupImage(name, nil) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + return false, err + } + if image == nil { + return false, nil + } + if err := image.isCorrupted(name); err != nil { + logrus.Error(err) + return false, nil + } + return true, nil +} + +// LookupImageOptions allow for customizing local image lookups. +type LookupImageOptions struct { + // Lookup an image matching the specified architecture. + Architecture string + // Lookup an image matching the specified OS. + OS string + // Lookup an image matching the specified variant. + Variant string + + // If set, do not look for items/instances in the manifest list that + // match the current platform but return the manifest list as is. + // only check for manifest list, return ErrNotAManifestList if not found. + lookupManifest bool + + // If matching images resolves to a manifest list, return manifest list + // instead of resolving to image instance, if manifest list is not found + // try resolving image. + ManifestList bool + + // If the image resolves to a manifest list, we usually lookup a + // matching instance and error if none could be found. In this case, + // just return the manifest list. Required for image removal. + returnManifestIfNoInstance bool +} + +var errNoHexValue = errors.New("invalid format: no 64-byte hexadecimal value") + +// Lookup Image looks up `name` in the local container storage. Returns the +// image and the name it has been found with. Note that name may also use the +// `containers-storage:` prefix used to refer to the containers-storage +// transport. Returns storage.ErrImageUnknown if the image could not be found. +// +// Unless specified via the options, the image will be looked up by name only +// without matching the architecture, os or variant. An exception is if the +// image resolves to a manifest list, where an instance of the manifest list +// matching the local or specified platform (via options.{Architecture,OS,Variant}) +// is returned. +// +// If the specified name uses the `containers-storage` transport, the resolved +// name is empty. +func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, string, error) { + logrus.Debugf("Looking up image %q in local containers storage", name) + + if options == nil { + options = &LookupImageOptions{} + } + + // If needed extract the name sans transport. + storageRef, err := alltransports.ParseImageName(name) + if err == nil { + if storageRef.Transport().Name() != storageTransport.Transport.Name() { + return nil, "", errors.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name()) + } + img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef) + if err != nil { + return nil, "", err + } + logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport()) + return r.storageToImage(img, storageRef), "", nil + } else { + // Docker compat: strip off the tag iff name is tagged and digested + // (e.g., fedora:latest@sha256...). In that case, the tag is stripped + // off and entirely ignored. The digest is the sole source of truth. + normalizedName, err := normalizeTaggedDigestedString(name) + if err != nil { + return nil, "", err + } + name = normalizedName + } + + byDigest := false + originalName := name + if strings.HasPrefix(name, "sha256:") { + byDigest = true + name = strings.TrimPrefix(name, "sha256:") + } + byFullID := reference.IsFullIdentifier(name) + + if byDigest && !byFullID { + return nil, "", fmt.Errorf("%s: %v", originalName, errNoHexValue) + } + + // If the name clearly refers to a local image, try to look it up. + if byFullID || byDigest { + img, err := r.lookupImageInLocalStorage(originalName, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, originalName, nil + } + return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + } + + // Unless specified, set the platform specified in the system context + // for later platform matching. Builder likes to set these things via + // the system context at runtime creation. + if options.Architecture == "" { + options.Architecture = r.systemContext.ArchitectureChoice + } + if options.OS == "" { + options.OS = r.systemContext.OSChoice + } + if options.Variant == "" { + options.Variant = r.systemContext.VariantChoice + } + // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). + options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) + + // Second, try out the candidates as resolved by shortnames. This takes + // "localhost/" prefixed images into account as well. + candidates, err := shortnames.ResolveLocally(&r.systemContext, name) + if err != nil { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + // Backwards compat: normalize to docker.io as some users may very well + // rely on that. + if dockerNamed, err := reference.ParseDockerRef(name); err == nil { + candidates = append(candidates, dockerNamed) + } + + for _, candidate := range candidates { + img, err := r.lookupImageInLocalStorage(name, candidate.String(), options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, candidate.String(), err + } + } + + // The specified name may refer to a short ID. Note that this *must* + // happen after the short-name expansion as done above. + img, err := r.lookupImageInLocalStorage(name, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, name, err + } + + return r.lookupImageInDigestsAndRepoTags(name, options) +} + +// lookupImageInLocalStorage looks up the specified candidate for name in the +// storage and checks whether it's matching the system context. +func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) { + logrus.Debugf("Trying %q ...", candidate) + img, err := r.store.Image(candidate) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + return nil, err + } + if img == nil { + return nil, nil + } + ref, err := storageTransport.Transport.ParseStoreReference(r.store, img.ID) + if err != nil { + return nil, err + } + + image := r.storageToImage(img, ref) + logrus.Debugf("Found image %q as %q in local containers storage", name, candidate) + + // If we referenced a manifest list, we need to check whether we can + // find a matching instance in the local containers storage. + isManifestList, err := image.IsManifestList(context.Background()) + if err != nil { + if errors.Cause(err) == os.ErrNotExist { + // We must be tolerant toward corrupted images. + // See containers/podman commit fd9dd7065d44. + logrus.Warnf("Failed to determine if an image is a manifest list: %v, ignoring the error", err) + return image, nil + } + return nil, err + } + if options.lookupManifest || options.ManifestList { + if isManifestList { + return image, nil + } + // return ErrNotAManifestList if lookupManifest is set otherwise try resolving image. + if options.lookupManifest { + return nil, errors.Wrapf(ErrNotAManifestList, candidate) + } + } + + if isManifestList { + logrus.Debugf("Candidate %q is a manifest list, looking up matching instance", candidate) + manifestList, err := image.ToManifestList() + if err != nil { + return nil, err + } + instance, err := manifestList.LookupInstance(context.Background(), options.Architecture, options.OS, options.Variant) + if err != nil { + if options.returnManifestIfNoInstance { + logrus.Debug("No matching instance was found: returning manifest list instead") + return image, nil + } + return nil, errors.Wrap(storage.ErrImageUnknown, err.Error()) + } + ref, err = storageTransport.Transport.ParseStoreReference(r.store, "@"+instance.ID()) + if err != nil { + return nil, err + } + image = instance + } + + matches, err := r.imageReferenceMatchesContext(ref, options) + if err != nil { + return nil, err + } + + // NOTE: if the user referenced by ID we must optimistically assume + // that they know what they're doing. Given, we already did the + // manifest limbo above, we may already have resolved it. + if !matches && !strings.HasPrefix(image.ID(), candidate) { + return nil, nil + } + // Also print the string within the storage transport. That may aid in + // debugging when using additional stores since we see explicitly where + // the store is and which driver (options) are used. + logrus.Debugf("Found image %q as %q in local containers storage (%s)", name, candidate, ref.StringWithinTransport()) + return image, nil +} + +// lookupImageInDigestsAndRepoTags attempts to match name against any image in +// the local containers storage. If name is digested, it will be compared +// against image digests. Otherwise, it will be looked up in the repo tags. +func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupImageOptions) (*Image, string, error) { + // Until now, we've tried very hard to find an image but now it is time + // for limbo. If the image includes a digest that we couldn't detect + // verbatim in the storage, we must have a look at all digests of all + // images. Those may change over time (e.g., via manifest lists). + // Both Podman and Buildah want us to do that dance. + allImages, err := r.ListImages(context.Background(), nil, nil) + if err != nil { + return nil, "", err + } + + ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed + if err != nil { + return nil, "", err + } + named, isNamed := ref.(reference.Named) + if !isNamed { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + + digested, isDigested := named.(reference.Digested) + if isDigested { + logrus.Debug("Looking for image with matching recorded digests") + digest := digested.Digest() + for _, image := range allImages { + for _, d := range image.Digests() { + if d != digest { + continue + } + // Also make sure that the matching image fits all criteria (e.g., manifest list). + if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil { + return nil, "", err + } + return image, name, nil + + } + } + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + + if !shortnames.IsShortName(name) { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + namedTagged, isNammedTagged := named.(reference.NamedTagged) + if !isNammedTagged { + // NOTE: this should never happen since we already know it's + // not a digested reference. + return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown) + } + + for _, image := range allImages { + named, err := image.inRepoTags(namedTagged) + if err != nil { + return nil, "", err + } + if named == nil { + continue + } + img, err := r.lookupImageInLocalStorage(name, named.String(), options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, named.String(), err + } + } + + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) +} + +// ResolveName resolves the specified name. If the name resolves to a local +// image, the fully resolved name will be returned. Otherwise, the name will +// be properly normalized. +// +// Note that an empty string is returned as is. +func (r *Runtime) ResolveName(name string) (string, error) { + if name == "" { + return "", nil + } + image, resolvedName, err := r.LookupImage(name, nil) + if err != nil && errors.Cause(err) != storage.ErrImageUnknown { + return "", err + } + + if image != nil && !strings.HasPrefix(image.ID(), resolvedName) { + return resolvedName, err + } + + normalized, err := NormalizeName(name) + if err != nil { + return "", err + } + + return normalized.String(), nil +} + +// imageReferenceMatchesContext return true if the specified reference matches +// the platform (os, arch, variant) as specified by the lookup options. +func (r *Runtime) imageReferenceMatchesContext(ref types.ImageReference, options *LookupImageOptions) (bool, error) { + if options.Architecture+options.OS+options.Variant == "" { + return true, nil + } + + ctx := context.Background() + img, err := ref.NewImage(ctx, &r.systemContext) + if err != nil { + return false, err + } + defer img.Close() + data, err := img.Inspect(ctx) + if err != nil { + return false, err + } + + if options.Architecture != "" && options.Architecture != data.Architecture { + logrus.Debugf("architecture %q does not match architecture %q of image %s", options.Architecture, data.Architecture, ref) + return false, nil + } + if options.OS != "" && options.OS != data.Os { + logrus.Debugf("OS %q does not match OS %q of image %s", options.OS, data.Os, ref) + return false, nil + } + if options.Variant != "" && options.Variant != data.Variant { + logrus.Debugf("variant %q does not match variant %q of image %s", options.Variant, data.Variant, ref) + return false, nil + } + + return true, nil +} + +// IsExternalContainerFunc allows for checking whether the specified container +// is an external one. The definition of an external container can be set by +// callers. +type IsExternalContainerFunc func(containerID string) (bool, error) + +// ListImagesOptions allow for customizing listing images. +type ListImagesOptions struct { + // Filters to filter the listed images. Supported filters are + // * after,before,since=image + // * containers=true,false,external + // * dangling=true,false + // * intermediate=true,false (useful for pruning images) + // * id=id + // * label=key[=value] + // * readonly=true,false + // * reference=name[:tag] (wildcards allowed) + Filters []string + // IsExternalContainerFunc allows for checking whether the specified + // container is an external one (when containers=external filter is + // used). The definition of an external container can be set by + // callers. + IsExternalContainerFunc IsExternalContainerFunc +} + +// ListImages lists images in the local container storage. If names are +// specified, only images with the specified names are looked up and filtered. +func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListImagesOptions) ([]*Image, error) { + if options == nil { + options = &ListImagesOptions{} + } + + var images []*Image + if len(names) > 0 { + for _, name := range names { + image, _, err := r.LookupImage(name, nil) + if err != nil { + return nil, err + } + images = append(images, image) + } + } else { + storageImages, err := r.store.Images() + if err != nil { + return nil, err + } + for i := range storageImages { + images = append(images, r.storageToImage(&storageImages[i], nil)) + } + } + + return r.filterImages(ctx, images, options) +} + +// RemoveImagesOptions allow for customizing image removal. +type RemoveImagesOptions struct { + // Force will remove all containers from the local storage that are + // using a removed image. Use RemoveContainerFunc for a custom logic. + // If set, all child images will be removed as well. + Force bool + // LookupManifest will expect all specified names to be manifest lists (no instance look up). + // This allows for removing manifest lists. + // By default, RemoveImages will attempt to resolve to a manifest instance matching + // the local platform (i.e., os, architecture, variant). + LookupManifest bool + // RemoveContainerFunc allows for a custom logic for removing + // containers using a specific image. By default, all containers in + // the local containers storage will be removed (if Force is set). + RemoveContainerFunc RemoveContainerFunc + // Ignore if a specified image does not exist and do not throw an error. + Ignore bool + // IsExternalContainerFunc allows for checking whether the specified + // container is an external one (when containers=external filter is + // used). The definition of an external container can be set by + // callers. + IsExternalContainerFunc IsExternalContainerFunc + // Remove external containers even when Force is false. Requires + // IsExternalContainerFunc to be specified. + ExternalContainers bool + // Filters to filter the removed images. Supported filters are + // * after,before,since=image + // * containers=true,false,external + // * dangling=true,false + // * intermediate=true,false (useful for pruning images) + // * id=id + // * label=key[=value] + // * readonly=true,false + // * reference=name[:tag] (wildcards allowed) + Filters []string + // The RemoveImagesReport will include the size of the removed image. + // This information may be useful when pruning images to figure out how + // much space was freed. However, computing the size of an image is + // comparatively expensive, so it is made optional. + WithSize bool +} + +// RemoveImages removes images specified by names. If no names are specified, +// remove images as specified via the options' filters. All images are +// expected to exist in the local containers storage. +// +// If an image has more names than one name, the image will be untagged with +// the specified name. RemoveImages returns a slice of untagged and removed +// images. +// +// Note that most errors are non-fatal and collected into `rmErrors` return +// value. +func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *RemoveImagesOptions) (reports []*RemoveImageReport, rmErrors []error) { + if options == nil { + options = &RemoveImagesOptions{} + } + + if options.ExternalContainers && options.IsExternalContainerFunc == nil { + return nil, []error{fmt.Errorf("libimage error: cannot remove external containers without callback")} + } + + // The logic here may require some explanation. Image removal is + // surprisingly complex since it is recursive (intermediate parents are + // removed) and since multiple items in `names` may resolve to the + // *same* image. On top, the data in the containers storage is shared, + // so we need to be careful and the code must be robust. That is why + // users can only remove images via this function; the logic may be + // complex but the execution path is clear. + + // Bundle an image with a possible empty slice of names to untag. That + // allows for a decent untagging logic and to bundle multiple + // references to the same *Image (and circumvent consistency issues). + type deleteMe struct { + image *Image + referencedBy []string + } + + appendError := func(err error) { + rmErrors = append(rmErrors, err) + } + + deleteMap := make(map[string]*deleteMe) // ID -> deleteMe + toDelete := []string{} + // Look up images in the local containers storage and fill out + // toDelete and the deleteMap. + + switch { + case len(names) > 0: + // prepare lookupOptions + var lookupOptions *LookupImageOptions + if options.LookupManifest { + // LookupManifest configured as true make sure we only remove manifests and no referenced images. + lookupOptions = &LookupImageOptions{lookupManifest: true} + } else { + lookupOptions = &LookupImageOptions{returnManifestIfNoInstance: true} + } + // Look up the images one-by-one. That allows for removing + // images that have been looked up successfully while reporting + // lookup errors at the end. + for _, name := range names { + img, resolvedName, err := r.LookupImage(name, lookupOptions) + if err != nil { + if options.Ignore && errors.Is(err, storage.ErrImageUnknown) { + continue + } + appendError(err) + continue + } + dm, exists := deleteMap[img.ID()] + if !exists { + toDelete = append(toDelete, img.ID()) + dm = &deleteMe{image: img} + deleteMap[img.ID()] = dm + } + dm.referencedBy = append(dm.referencedBy, resolvedName) + } + + default: + options := &ListImagesOptions{ + IsExternalContainerFunc: options.IsExternalContainerFunc, + Filters: options.Filters, + } + filteredImages, err := r.ListImages(ctx, nil, options) + if err != nil { + appendError(err) + return nil, rmErrors + } + for _, img := range filteredImages { + toDelete = append(toDelete, img.ID()) + deleteMap[img.ID()] = &deleteMe{image: img} + } + } + + // Return early if there's no image to delete. + if len(deleteMap) == 0 { + return nil, rmErrors + } + + // Now remove the images in the given order. + rmMap := make(map[string]*RemoveImageReport) + orderedIDs := []string{} + visitedIDs := make(map[string]bool) + for _, id := range toDelete { + del, exists := deleteMap[id] + if !exists { + appendError(errors.Errorf("internal error: ID %s not in found in image-deletion map", id)) + continue + } + if len(del.referencedBy) == 0 { + del.referencedBy = []string{""} + } + for _, ref := range del.referencedBy { + processedIDs, err := del.image.remove(ctx, rmMap, ref, options) + if err != nil { + appendError(err) + } + // NOTE: make sure to add given ID only once to orderedIDs. + for _, id := range processedIDs { + if visited := visitedIDs[id]; visited { + continue + } + orderedIDs = append(orderedIDs, id) + visitedIDs[id] = true + } + } + } + + // Finally, we can assemble the reports slice. + for _, id := range orderedIDs { + report, exists := rmMap[id] + if exists { + reports = append(reports, report) + } + } + + return reports, rmErrors +} diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go new file mode 100644 index 00000000000..fed86d4efce --- /dev/null +++ b/vendor/github.com/containers/common/libimage/save.go @@ -0,0 +1,226 @@ +package libimage + +import ( + "context" + "strings" + "time" + + dirTransport "github.com/containers/image/v5/directory" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociTransport "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + ociv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SaveOptions allow for customizing saving images. +type SaveOptions struct { + CopyOptions + + // AdditionalTags for the saved image. Incompatible when saving + // multiple images. + AdditionalTags []string +} + +// Save saves one or more images indicated by `names` in the specified `format` +// to `path`. Supported formats are oci-archive, docker-archive, oci-dir and +// docker-dir. The latter two adhere to the dir transport in the corresponding +// oci or docker v2s2 format. Please note that only docker-archive supports +// saving more than one images. Other formats will yield an error attempting +// to save more than one. +func (r *Runtime) Save(ctx context.Context, names []string, format, path string, options *SaveOptions) error { + logrus.Debugf("Saving one more images (%s) to %q", names, path) + + if options == nil { + options = &SaveOptions{} + } + + // First some sanity checks to simplify subsequent code. + switch len(names) { + case 0: + return errors.New("no image specified for saving images") + case 1: + // All formats support saving 1. + default: + if format != "docker-archive" { + return errors.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format) + } + if len(options.AdditionalTags) > 0 { + return errors.Errorf("cannot save multiple images with multiple tags") + } + } + + // Dispatch the save operations. + switch format { + case "oci-archive", "oci-dir", "docker-dir": + if len(names) > 1 { + return errors.Errorf("%q does not support saving multiple images (%v)", format, names) + } + return r.saveSingleImage(ctx, names[0], format, path, options) + + case "docker-archive": + options.ManifestMIMEType = manifest.DockerV2Schema2MediaType + return r.saveDockerArchive(ctx, names, path, options) + } + + return errors.Errorf("unsupported format %q for saving images", format) +} + +// saveSingleImage saves the specified image name to the specified path. +// Supported formats are "oci-archive", "oci-dir" and "docker-dir". +func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string, options *SaveOptions) error { + image, imageName, err := r.LookupImage(name, nil) + if err != nil { + return err + } + + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: image.ID(), Name: path, Time: time.Now(), Type: EventTypeImageSave}) + } + + // Unless the image was referenced by ID, use the resolved name as a + // tag. + var tag string + if !strings.HasPrefix(image.ID(), imageName) { + tag = imageName + } + + srcRef, err := image.StorageReference() + if err != nil { + return err + } + + // Prepare the destination reference. + var destRef types.ImageReference + switch format { + case "oci-archive": + destRef, err = ociArchiveTransport.NewReference(path, tag) + + case "oci-dir": + destRef, err = ociTransport.NewReference(path, tag) + options.ManifestMIMEType = ociv1.MediaTypeImageManifest + + case "docker-dir": + destRef, err = dirTransport.NewReference(path) + options.ManifestMIMEType = manifest.DockerV2Schema2MediaType + + default: + return errors.Errorf("unsupported format %q for saving images", format) + } + + if err != nil { + return err + } + + c, err := r.newCopier(&options.CopyOptions) + if err != nil { + return err + } + defer c.close() + + _, err = c.copy(ctx, srcRef, destRef) + return err +} + +// saveDockerArchive saves the specified images indicated by names to the path. +// It loads all images from the local containers storage and assembles the meta +// data needed to properly save images. Since multiple names could refer to +// the *same* image, we need to dance a bit and store additional "names". +// Those can then be used as additional tags when copying. +func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path string, options *SaveOptions) error { + type localImage struct { + image *Image + tags []reference.NamedTagged + } + + additionalTags := []reference.NamedTagged{} + for _, tag := range options.AdditionalTags { + named, err := NormalizeName(tag) + if err == nil { + tagged, withTag := named.(reference.NamedTagged) + if !withTag { + return errors.Errorf("invalid additional tag %q: normalized to untagged %q", tag, named.String()) + } + additionalTags = append(additionalTags, tagged) + } + } + + orderedIDs := []string{} // to preserve the relative order + localImages := make(map[string]*localImage) // to assemble tags + visitedNames := make(map[string]bool) // filters duplicate names + for _, name := range names { + // Look up local images. + image, imageName, err := r.LookupImage(name, nil) + if err != nil { + return err + } + // Make sure to filter duplicates purely based on the resolved + // name. + if _, exists := visitedNames[imageName]; exists { + continue + } + visitedNames[imageName] = true + // Extract and assemble the data. + local, exists := localImages[image.ID()] + if !exists { + local = &localImage{image: image} + local.tags = additionalTags + orderedIDs = append(orderedIDs, image.ID()) + } + // Add the tag if the locally resolved name is properly tagged + // (which it should unless we looked it up by ID). + named, err := reference.ParseNamed(imageName) + if err == nil { + tagged, withTag := named.(reference.NamedTagged) + if withTag { + local.tags = append(local.tags, tagged) + } + } + localImages[image.ID()] = local + if r.eventChannel != nil { + defer r.writeEvent(&Event{ID: image.ID(), Name: path, Time: time.Now(), Type: EventTypeImageSave}) + } + } + + writer, err := dockerArchiveTransport.NewWriter(r.systemContextCopy(), path) + if err != nil { + return err + } + defer writer.Close() + + for _, id := range orderedIDs { + local, exists := localImages[id] + if !exists { + return errors.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id) + } + + copyOpts := options.CopyOptions + copyOpts.dockerArchiveAdditionalTags = local.tags + + c, err := r.newCopier(©Opts) + if err != nil { + return err + } + defer c.close() + + destRef, err := writer.NewReference(nil) + if err != nil { + return err + } + + srcRef, err := local.image.StorageReference() + if err != nil { + return err + } + + if _, err := c.copy(ctx, srcRef, destRef); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go new file mode 100644 index 00000000000..33a4776ce3f --- /dev/null +++ b/vendor/github.com/containers/common/libimage/search.go @@ -0,0 +1,324 @@ +package libimage + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + + registryTransport "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/transports/alltransports" + "github.com/containers/image/v5/types" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" +) + +const ( + searchTruncLength = 44 + searchMaxQueries = 25 + // Let's follow Firefox by limiting parallel downloads to 6. We do the + // same when pulling images in c/image. + searchMaxParallel = int64(6) +) + +// SearchResult is holding image-search related data. +type SearchResult struct { + // Index is the image index (e.g., "docker.io" or "quay.io") + Index string + // Name is the canonical name of the image (e.g., "docker.io/library/alpine"). + Name string + // Description of the image. + Description string + // Stars is the number of stars of the image. + Stars int + // Official indicates if it's an official image. + Official string + // Automated indicates if the image was created by an automated build. + Automated string + // Tag is the image tag + Tag string +} + +// SearchOptions customize searching images. +type SearchOptions struct { + // Filter allows to filter the results. + Filter SearchFilter + // Limit limits the number of queries per index (default: 25). Must be + // greater than 0 to overwrite the default value. + Limit int + // NoTrunc avoids the output to be truncated. + NoTrunc bool + // Authfile is the path to the authentication file. + Authfile string + // InsecureSkipTLSVerify allows to skip TLS verification. + InsecureSkipTLSVerify types.OptionalBool + // ListTags returns the search result with available tags + ListTags bool + // Registries to search if the specified term does not include a + // registry. If set, the unqualified-search registries in + // containers-registries.conf(5) are ignored. + Registries []string +} + +// SearchFilter allows filtering images while searching. +type SearchFilter struct { + // Stars describes the minimal amount of starts of an image. + Stars int + // IsAutomated decides if only images from automated builds are displayed. + IsAutomated types.OptionalBool + // IsOfficial decides if only official images are displayed. + IsOfficial types.OptionalBool +} + +// ParseSearchFilter turns the filter into a SearchFilter that can be used for +// searching images. +func ParseSearchFilter(filter []string) (*SearchFilter, error) { + sFilter := new(SearchFilter) + for _, f := range filter { + arr := strings.SplitN(f, "=", 2) + switch arr[0] { + case "stars": + if len(arr) < 2 { + return nil, errors.Errorf("invalid `stars` filter %q, should be stars=", filter) + } + stars, err := strconv.Atoi(arr[1]) + if err != nil { + return nil, errors.Wrapf(err, "incorrect value type for stars filter") + } + sFilter.Stars = stars + case "is-automated": + if len(arr) == 2 && arr[1] == "false" { + sFilter.IsAutomated = types.OptionalBoolFalse + } else { + sFilter.IsAutomated = types.OptionalBoolTrue + } + case "is-official": + if len(arr) == 2 && arr[1] == "false" { + sFilter.IsOfficial = types.OptionalBoolFalse + } else { + sFilter.IsOfficial = types.OptionalBoolTrue + } + default: + return nil, errors.Errorf("invalid filter type %q", f) + } + } + return sFilter, nil +} + +// Search searches term. If term includes a registry, only this registry will +// be used for searching. Otherwise, the unqualified-search registries in +// containers-registries.conf(5) or the ones specified in the options will be +// used. +func (r *Runtime) Search(ctx context.Context, term string, options *SearchOptions) ([]SearchResult, error) { + if options == nil { + options = &SearchOptions{} + } + + var searchRegistries []string + + // Try to extract a registry from the specified search term. We + // consider everything before the first slash to be the registry. Note + // that we cannot use the reference parser from the containers/image + // library as the search term may container arbitrary input such as + // wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629. + spl := strings.SplitN(term, "/", 2) + switch { + case len(spl) > 1: + searchRegistries = []string{spl[0]} + term = spl[1] + case len(options.Registries) > 0: + searchRegistries = options.Registries + default: + regs, err := sysregistriesv2.UnqualifiedSearchRegistries(r.systemContextCopy()) + if err != nil { + return nil, err + } + searchRegistries = regs + } + + logrus.Debugf("Searching images matching term %s at the following registries %s", term, searchRegistries) + + // searchOutputData is used as a return value for searching in parallel. + type searchOutputData struct { + data []SearchResult + err error + } + + sem := semaphore.NewWeighted(searchMaxParallel) + wg := sync.WaitGroup{} + wg.Add(len(searchRegistries)) + data := make([]searchOutputData, len(searchRegistries)) + + for i := range searchRegistries { + if err := sem.Acquire(ctx, 1); err != nil { + return nil, err + } + index := i + go func() { + defer sem.Release(1) + defer wg.Done() + searchOutput, err := r.searchImageInRegistry(ctx, term, searchRegistries[index], options) + data[index] = searchOutputData{data: searchOutput, err: err} + }() + } + + wg.Wait() + results := []SearchResult{} + var multiErr error + for _, d := range data { + if d.err != nil { + multiErr = multierror.Append(multiErr, d.err) + continue + } + results = append(results, d.data...) + } + + // Optimistically assume that one successfully searched registry + // includes what the user is looking for. + if len(results) > 0 { + return results, nil + } + return results, multiErr +} + +func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry string, options *SearchOptions) ([]SearchResult, error) { + // Max number of queries by default is 25 + limit := searchMaxQueries + if options.Limit > 0 { + limit = options.Limit + } + + sys := r.systemContextCopy() + if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined { + sys.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify + } + + if options.Authfile != "" { + sys.AuthFilePath = options.Authfile + } + + if options.ListTags { + results, err := searchRepositoryTags(ctx, sys, registry, term, options) + if err != nil { + return []SearchResult{}, err + } + return results, nil + } + + results, err := registryTransport.SearchRegistry(ctx, sys, registry, term, limit) + if err != nil { + return []SearchResult{}, err + } + index := registry + arr := strings.Split(registry, ".") + if len(arr) > 2 { + index = strings.Join(arr[len(arr)-2:], ".") + } + + // limit is the number of results to output + // if the total number of results is less than the limit, output all + // if the limit has been set by the user, output those number of queries + limit = searchMaxQueries + if len(results) < limit { + limit = len(results) + } + if options.Limit != 0 { + limit = len(results) + if options.Limit < len(results) { + limit = options.Limit + } + } + + paramsArr := []SearchResult{} + for i := 0; i < limit; i++ { + // Check whether query matches filters + if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) { + continue + } + official := "" + if results[i].IsOfficial { + official = "[OK]" + } + automated := "" + if results[i].IsAutomated { + automated = "[OK]" + } + description := strings.ReplaceAll(results[i].Description, "\n", " ") + if len(description) > 44 && !options.NoTrunc { + description = description[:searchTruncLength] + "..." + } + name := registry + "/" + results[i].Name + if index == "docker.io" && !strings.Contains(results[i].Name, "/") { + name = index + "/library/" + results[i].Name + } + params := SearchResult{ + Index: registry, + Name: name, + Description: description, + Official: official, + Automated: automated, + Stars: results[i].StarCount, + } + paramsArr = append(paramsArr, params) + } + return paramsArr, nil +} + +func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registry, term string, options *SearchOptions) ([]SearchResult, error) { + dockerPrefix := "docker://" + imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term)) + if err == nil && imageRef.Transport().Name() != registryTransport.Transport.Name() { + return nil, errors.Errorf("reference %q must be a docker reference", term) + } else if err != nil { + imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term))) + if err != nil { + return nil, errors.Errorf("reference %q must be a docker reference", term) + } + } + tags, err := registryTransport.GetRepositoryTags(ctx, sys, imageRef) + if err != nil { + return nil, errors.Errorf("error getting repository tags: %v", err) + } + limit := searchMaxQueries + if len(tags) < limit { + limit = len(tags) + } + if options.Limit != 0 { + limit = len(tags) + if options.Limit < limit { + limit = options.Limit + } + } + paramsArr := []SearchResult{} + for i := 0; i < limit; i++ { + params := SearchResult{ + Name: imageRef.DockerReference().Name(), + Tag: tags[i], + Index: registry, + } + paramsArr = append(paramsArr, params) + } + return paramsArr, nil +} + +func (f *SearchFilter) matchesStarFilter(result registryTransport.SearchResult) bool { + return result.StarCount >= f.Stars +} + +func (f *SearchFilter) matchesAutomatedFilter(result registryTransport.SearchResult) bool { + if f.IsAutomated != types.OptionalBoolUndefined { + return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue) + } + return true +} + +func (f *SearchFilter) matchesOfficialFilter(result registryTransport.SearchResult) bool { + if f.IsOfficial != types.OptionalBoolUndefined { + return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue) + } + return true +} diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go new file mode 100644 index 00000000000..a1e4d71edd8 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -0,0 +1,50 @@ +package types + +const ( + // BridgeNetworkDriver defines the bridge driver + BridgeNetworkDriver = "bridge" + // DefaultNetworkDriver is the default network type used + DefaultNetworkDriver = BridgeNetworkDriver + // MacVLANNetworkDriver defines the macvlan driver + MacVLANNetworkDriver = "macvlan" + // MacVLANNetworkDriver defines the macvlan driver + IPVLANNetworkDriver = "ipvlan" + + // IPAM drivers + Driver = "driver" + // HostLocalIPAMDriver store the ip locally in a db + HostLocalIPAMDriver = "host-local" + // DHCPIPAMDriver get subnet and ip from dhcp server + DHCPIPAMDriver = "dhcp" + // NoneIPAMDriver do not provide ipam management + NoneIPAMDriver = "none" + + // DefaultSubnet is the name that will be used for the default CNI network. + DefaultNetworkName = "podman" + // DefaultSubnet is the subnet that will be used for the default CNI network. + DefaultSubnet = "10.88.0.0/16" + + // valid macvlan driver mode values + MacVLANModeBridge = "bridge" + MacVLANModePrivate = "private" + MacVLANModeVepa = "vepa" + MacVLANModePassthru = "passthru" + + // valid ipvlan driver modes + IPVLANModeL2 = "l2" + IPVLANModeL3 = "l3" + IPVLANModeL3s = "l3s" +) + +type NetworkBackend string + +const ( + CNI NetworkBackend = "cni" + Netavark NetworkBackend = "netavark" +) + +// ValidMacVLANModes is the list of valid mode options for the macvlan driver +var ValidMacVLANModes = []string{MacVLANModeBridge, MacVLANModePrivate, MacVLANModeVepa, MacVLANModePassthru} + +// ValidIPVLANModes is the list of valid mode options for the ipvlan driver +var ValidIPVLANModes = []string{IPVLANModeL2, IPVLANModeL3, IPVLANModeL3s} diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go new file mode 100644 index 00000000000..d37e529dfe6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -0,0 +1,25 @@ +package types + +import ( + "regexp" + + "github.com/pkg/errors" +) + +var ( + // ErrNoSuchNetwork indicates the requested network does not exist + ErrNoSuchNetwork = errors.New("network not found") + + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = errors.New("invalid argument") + + // ErrNetworkExists indicates that a network with the given name already + // exists. + ErrNetworkExists = errors.New("network already exists") + + // NameRegex is a regular expression to validate names. + // This must NOT be changed. + NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + // RegexError is thrown in presence of an invalid name. + RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") +) diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go new file mode 100644 index 00000000000..de865537738 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -0,0 +1,282 @@ +package types + +import ( + "encoding/json" + "net" + "time" +) + +type ContainerNetwork interface { + // NetworkCreate will take a partial filled Network and fill the + // missing fields. It creates the Network and returns the full Network. + NetworkCreate(Network) (Network, error) + // NetworkRemove will remove the Network with the given name or ID. + NetworkRemove(nameOrID string) error + // NetworkList will return all known Networks. Optionally you can + // supply a list of filter functions. Only if a network matches all + // functions it is returned. + NetworkList(...FilterFunc) ([]Network, error) + // NetworkInspect will return the Network with the given name or ID. + NetworkInspect(nameOrID string) (Network, error) + + // Setup will setup the container network namespace. It returns + // a map of StatusBlocks, the key is the network name. + Setup(namespacePath string, options SetupOptions) (map[string]StatusBlock, error) + // Teardown will teardown the container network namespace. + Teardown(namespacePath string, options TeardownOptions) error + + // Drivers will return the list of supported network drivers + // for this interface. + Drivers() []string + + // DefaultNetworkName will return the default network name + // for this interface. + DefaultNetworkName() string +} + +// Network describes the Network attributes. +type Network struct { + // Name of the Network. + Name string `json:"name"` + // ID of the Network. + ID string `json:"id"` + // Driver for this Network, e.g. bridge, macvlan... + Driver string `json:"driver"` + // NetworkInterface is the network interface name on the host. + NetworkInterface string `json:"network_interface,omitempty"` + // Created contains the timestamp when this network was created. + Created time.Time `json:"created,omitempty"` + // Subnets to use for this network. + Subnets []Subnet `json:"subnets,omitempty"` + // IPv6Enabled if set to true an ipv6 subnet should be created for this net. + IPv6Enabled bool `json:"ipv6_enabled"` + // Internal is whether the Network should not have external routes + // to public or other Networks. + Internal bool `json:"internal"` + // DNSEnabled is whether name resolution is active for container on + // this Network. + DNSEnabled bool `json:"dns_enabled"` + // Labels is a set of key-value labels that have been applied to the + // Network. + Labels map[string]string `json:"labels,omitempty"` + // Options is a set of key-value options that have been applied to + // the Network. + Options map[string]string `json:"options,omitempty"` + // IPAMOptions contains options used for the ip assignment. + IPAMOptions map[string]string `json:"ipam_options,omitempty"` +} + +// IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. +type IPNet struct { + net.IPNet +} + +// ParseCIDR parse a string to IPNet +func ParseCIDR(cidr string) (IPNet, error) { + ip, subnet, err := net.ParseCIDR(cidr) + if err != nil { + return IPNet{}, err + } + // convert to 4 bytes if ipv4 + ipv4 := ip.To4() + if ipv4 != nil { + ip = ipv4 + } + subnet.IP = ip + return IPNet{*subnet}, err +} + +func (n *IPNet) MarshalText() ([]byte, error) { + return []byte(n.String()), nil +} + +func (n *IPNet) UnmarshalText(text []byte) error { + subnet, err := ParseCIDR(string(text)) + if err != nil { + return err + } + *n = subnet + return nil +} + +// HardwareAddr is the same as net.HardwareAddr except +// that it adds the json marshal/unmarshal methods. +// This allows us to read the mac from a json string +// and a byte array. +// swagger:model MacAddress +type HardwareAddr net.HardwareAddr + +func (h *HardwareAddr) String() string { + return (*net.HardwareAddr)(h).String() +} + +func (h HardwareAddr) MarshalText() ([]byte, error) { + return []byte(h.String()), nil +} + +func (h *HardwareAddr) UnmarshalJSON(text []byte) error { + if len(text) == 0 { + *h = nil + return nil + } + + // if the json string start with a quote we got a string + // unmarshal the string and parse the mac from this string + if string(text[0]) == `"` { + var macString string + err := json.Unmarshal(text, &macString) + if err == nil { + mac, err := net.ParseMAC(macString) + if err == nil { + *h = HardwareAddr(mac) + return nil + } + } + } + // not a string or got an error fallback to the normal parsing + mac := make(net.HardwareAddr, 0, 6) + // use the standard json unmarshal for backwards compat + err := json.Unmarshal(text, &mac) + if err != nil { + return err + } + *h = HardwareAddr(mac) + return nil +} + +type Subnet struct { + // Subnet for this Network in CIDR form. + // swagger:strfmt string + Subnet IPNet `json:"subnet"` + // Gateway IP for this Network. + // swagger:strfmt string + Gateway net.IP `json:"gateway,omitempty"` + // LeaseRange contains the range where IP are leased. Optional. + LeaseRange *LeaseRange `json:"lease_range,omitempty"` +} + +// LeaseRange contains the range where IP are leased. +type LeaseRange struct { + // StartIP first IP in the subnet which should be used to assign ips. + // swagger:strfmt string + StartIP net.IP `json:"start_ip,omitempty"` + // EndIP last IP in the subnet which should be used to assign ips. + // swagger:strfmt string + EndIP net.IP `json:"end_ip,omitempty"` +} + +// StatusBlock contains the network information about a container +// connected to one Network. +type StatusBlock struct { + // Interfaces contains the created network interface in the container. + // The map key is the interface name. + Interfaces map[string]NetInterface `json:"interfaces,omitempty"` + // DNSServerIPs nameserver addresses which should be added to + // the containers resolv.conf file. + DNSServerIPs []net.IP `json:"dns_server_ips,omitempty"` + // DNSSearchDomains search domains which should be added to + // the containers resolv.conf file. + DNSSearchDomains []string `json:"dns_search_domains,omitempty"` +} + +// NetInterface contains the settings for a given network interface. +type NetInterface struct { + // Subnets list of assigned subnets with their gateway. + Subnets []NetAddress `json:"subnets,omitempty"` + // MacAddress for this Interface. + MacAddress HardwareAddr `json:"mac_address"` +} + +// NetAddress contains the ip address, subnet and gateway. +type NetAddress struct { + // IPNet of this NetAddress. Note that this is a subnet but it has to contain the + // actual ip of the network interface and not the network address. + IPNet IPNet `json:"ipnet"` + // Gateway for the network. This can be empty if there is no gateway, e.g. internal network. + Gateway net.IP `json:"gateway,omitempty"` +} + +// PerNetworkOptions are options which should be set on a per network basis. +type PerNetworkOptions struct { + // StaticIPs for this container. Optional. + StaticIPs []net.IP `json:"static_ips,omitempty"` + // Aliases contains a list of names which the dns server should resolve + // to this container. Should only be set when DNSEnabled is true on the Network. + // If aliases are set but there is no dns support for this network the + // network interface implementation should ignore this and NOT error. + // Optional. + Aliases []string `json:"aliases,omitempty"` + // StaticMac for this container. Optional. + StaticMAC HardwareAddr `json:"static_mac,omitempty"` + // InterfaceName for this container. Required in the backend. + // Optional in the frontend. Will be filled with ethX (where X is a integer) when empty. + InterfaceName string `json:"interface_name"` +} + +// NetworkOptions for a given container. +type NetworkOptions struct { + // ContainerID is the container id, used for iptables comments and ipam allocation. + ContainerID string `json:"container_id"` + // ContainerName is the container name, used as dns name. + ContainerName string `json:"container_name"` + // PortMappings contains the port mappings for this container + PortMappings []PortMapping `json:"port_mappings,omitempty"` + // Networks contains all networks with the PerNetworkOptions. + // The map should contain at least one element. + Networks map[string]PerNetworkOptions `json:"networks"` +} + +// PortMapping is one or more ports that will be mapped into the container. +type PortMapping struct { + // HostIP is the IP that we will bind to on the host. + // If unset, assumed to be 0.0.0.0 (all interfaces). + HostIP string `json:"host_ip"` + // ContainerPort is the port number that will be exposed from the + // container. + // Mandatory. + ContainerPort uint16 `json:"container_port"` + // HostPort is the port number that will be forwarded from the host into + // the container. + // If omitted, a random port on the host (guaranteed to be over 1024) + // will be assigned. + HostPort uint16 `json:"host_port"` + // Range is the number of ports that will be forwarded, starting at + // HostPort and ContainerPort and counting up. + // This is 1-indexed, so 1 is assumed to be a single port (only the + // Hostport:Containerport mapping will be added), 2 is two ports (both + // Hostport:Containerport and Hostport+1:Containerport+1), etc. + // If unset, assumed to be 1 (a single port). + // Both hostport + range and containerport + range must be less than + // 65536. + Range uint16 `json:"range"` + // Protocol is the protocol forward. + // Must be either "tcp", "udp", and "sctp", or some combination of these + // separated by commas. + // If unset, assumed to be TCP. + Protocol string `json:"protocol"` +} + +// OCICNIPortMapping maps to the standard CNI portmapping Capability. +// Deprecated: Do not use this struct for new fields. This only exists +// for backwards compatibility. +type OCICNIPortMapping struct { + // HostPort is the port number on the host. + HostPort int32 `json:"hostPort"` + // ContainerPort is the port number inside the sandbox. + ContainerPort int32 `json:"containerPort"` + // Protocol is the protocol of the port mapping. + Protocol string `json:"protocol"` + // HostIP is the host ip to use. + HostIP string `json:"hostIP"` +} + +type SetupOptions struct { + NetworkOptions +} + +type TeardownOptions struct { + NetworkOptions +} + +// FilterFunc can be passed to NetworkList to filter the networks. +type FilterFunc func(Network) bool diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor.go new file mode 100644 index 00000000000..146280df2f7 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor.go @@ -0,0 +1,22 @@ +package apparmor + +import ( + "errors" + + "github.com/containers/common/version" +) + +const ( + // ProfilePrefix is used for version-independent presence checks. + ProfilePrefix = "containers-default-" + + // Profile default name + Profile = ProfilePrefix + version.Version +) + +var ( + // ErrApparmorUnsupported indicates that AppArmor support is not supported. + ErrApparmorUnsupported = errors.New("AppArmor is not supported") + // ErrApparmorRootless indicates that AppArmor support is not supported in rootless mode. + ErrApparmorRootless = errors.New("AppArmor is not supported in rootless mode") +) diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go new file mode 100644 index 00000000000..35f79a1adb1 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -0,0 +1,301 @@ +//go:build linux && apparmor +// +build linux,apparmor + +package apparmor + +import ( + "bufio" + "bytes" + "io" + "os" + "os/exec" + "path" + "strconv" + "strings" + "text/template" + + "github.com/containers/common/pkg/apparmor/internal/supported" + "github.com/containers/storage/pkg/unshare" + runcaa "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// profileDirectory is the file store for apparmor profiles and macros. +var profileDirectory = "/etc/apparmor.d" + +// IsEnabled returns true if AppArmor is enabled on the host. It also checks +// for the existence of the `apparmor_parser` binary, which will be required to +// apply profiles. +func IsEnabled() bool { + return supported.NewAppArmorVerifier().IsSupported() == nil +} + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer) error { + compiled, err := template.New("apparmor_profile").Parse(defaultProfileTemplate) + if err != nil { + return errors.Wrap(err, "create AppArmor profile from template") + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := getAAParserVersion(apparmorParserPath) + if err != nil { + return errors.Wrap(err, "get AppArmor version") + } + p.Version = ver + + return errors.Wrap(compiled.Execute(out, p), "execute compiled profile") +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile and loads it into the kernel +// using 'apparmor_parser'. +func InstallDefault(name string) error { + if unshare.IsRootless() { + return ErrApparmorRootless + } + + p := profileData{ + Name: name, + } + + apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary() + if err != nil { + return errors.Wrap(err, "find `apparmor_parser` binary") + } + + cmd := exec.Command(apparmorParserPath, "-Kr") + pipe, err := cmd.StdinPipe() + if err != nil { + return errors.Wrapf(err, "execute %s", apparmorParserPath) + } + if err := cmd.Start(); err != nil { + if pipeErr := pipe.Close(); pipeErr != nil { + logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) + } + return errors.Wrapf(err, "start %s command", apparmorParserPath) + } + if err := p.generateDefault(apparmorParserPath, pipe); err != nil { + if pipeErr := pipe.Close(); pipeErr != nil { + logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) + } + if cmdErr := cmd.Wait(); cmdErr != nil { + logrus.Errorf("Unable to wait for AppArmor command: %q", cmdErr) + } + return errors.Wrap(err, "generate default profile into pipe") + } + + if pipeErr := pipe.Close(); pipeErr != nil { + logrus.Errorf("Unable to close AppArmor pipe: %q", pipeErr) + } + + return errors.Wrap(cmd.Wait(), "wait for AppArmor command") +} + +// DefaultContent returns the default profile content as byte slice. The +// profile is named as the provided `name`. The function errors if the profile +// generation fails. +func DefaultContent(name string) ([]byte, error) { + p := profileData{Name: name} + buffer := &bytes.Buffer{} + + apparmorParserPath, err := supported.NewAppArmorVerifier().FindAppArmorParserBinary() + if err != nil { + return nil, errors.Wrap(err, "find `apparmor_parser` binary") + } + + if err := p.generateDefault(apparmorParserPath, buffer); err != nil { + return nil, errors.Wrap(err, "generate default AppAmor profile") + } + return buffer.Bytes(), nil +} + +// IsLoaded checks if a profile with the given name has been loaded into the +// kernel. +func IsLoaded(name string) (bool, error) { + if name != "" && unshare.IsRootless() { + return false, errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name) + } + + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, errors.Wrap(err, "open AppArmor profile path") + } + defer file.Close() + + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return false, errors.Wrap(err, "reading AppArmor profile") + } + if strings.HasPrefix(p, name+" ") { + return true, nil + } + } + + return false, nil +} + +// execAAParser runs `apparmor_parser` with the passed arguments. +func execAAParser(apparmorParserPath, dir string, args ...string) (string, error) { + c := exec.Command(apparmorParserPath, args...) + c.Dir = dir + + output, err := c.Output() + if err != nil { + return "", errors.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) + } + + return string(output), nil +} + +// getAAParserVersion returns the major and minor version of apparmor_parser. +func getAAParserVersion(apparmorParserPath string) (int, error) { + output, err := execAAParser(apparmorParserPath, "", "--version") + if err != nil { + return -1, errors.Wrap(err, "execute apparmor_parser") + } + return parseAAParserVersion(output) +} + +// parseAAParserVersion parses the given `apparmor_parser --version` output and +// returns the major and minor version number as an integer. +func parseAAParserVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, errors.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, errors.Wrap(err, "convert AppArmor major version") + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, errors.Wrap(err, "convert AppArmor minor version") + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, errors.Wrap(err, "convert AppArmor patch version") + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} + +// CheckProfileAndLoadDefault checks if the specified profile is loaded and +// loads the DefaultLibpodProfile if the specified on is prefixed by +// DefaultLipodProfilePrefix. This allows to always load and apply the latest +// default AppArmor profile. Note that AppArmor requires root. If it's a +// default profile, return DefaultLipodProfilePrefix, otherwise the specified +// one. +func CheckProfileAndLoadDefault(name string) (string, error) { + if name == "unconfined" { + return name, nil + } + + // AppArmor is not supported in rootless mode as it requires root + // privileges. Return an error in case a specific profile is specified. + if unshare.IsRootless() { + if name != "" { + return "", errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name) + } else { + logrus.Debug("Skipping loading default AppArmor profile (rootless mode)") + return "", nil + } + } + + // Check if AppArmor is disabled and error out if a profile is to be set. + if !runcaa.IsEnabled() { + if name == "" { + return "", nil + } else { + return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name) + } + } + + if name == "" { + name = Profile + } else if !strings.HasPrefix(name, ProfilePrefix) { + // If the specified name is not a default one, ignore it and return the + // name. + isLoaded, err := IsLoaded(name) + if err != nil { + return "", errors.Wrapf(err, "verify if profile %s is loaded", name) + } + if !isLoaded { + return "", errors.Errorf("AppArmor profile %q specified but not loaded", name) + } + return name, nil + } + + // To avoid expensive redundant loads on each invocation, check + // if it's loaded before installing it. + isLoaded, err := IsLoaded(name) + if err != nil { + return "", errors.Wrapf(err, "verify if profile %s is loaded", name) + } + if !isLoaded { + err = InstallDefault(name) + if err != nil { + return "", errors.Wrapf(err, "install profile %s", name) + } + logrus.Infof("Successfully loaded AppAmor profile %q", name) + } else { + logrus.Infof("AppAmor profile %q is already loaded", name) + } + + return name, nil +} diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go new file mode 100644 index 00000000000..667fa9f2655 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -0,0 +1,50 @@ +//go:build linux && apparmor +// +build linux,apparmor + +package apparmor + +const defaultProfileTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + +{{if ge .Version 208096}} + # Allow signals from privileged profiles and from within the same profile + signal (receive) peer=unconfined, + signal (send,receive) peer={{.Name}}, +{{end}} + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using using 'ps' inside a container + ptrace (trace,read) peer={{.Name}}, +{{end}} +} +` diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go new file mode 100644 index 00000000000..dacfc2f48c2 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go @@ -0,0 +1,32 @@ +//go:build !linux || !apparmor +// +build !linux !apparmor + +package apparmor + +// IsEnabled dummy. +func IsEnabled() bool { + return false +} + +// InstallDefault dummy. +func InstallDefault(name string) error { + return ErrApparmorUnsupported +} + +// IsLoaded dummy. +func IsLoaded(name string) (bool, error) { + return false, ErrApparmorUnsupported +} + +// CheckProfileAndLoadDefault dummy. +func CheckProfileAndLoadDefault(name string) (string, error) { + if name == "" { + return "", nil + } + return "", ErrApparmorUnsupported +} + +// DefaultContent dummy. +func DefaultContent(name string) ([]byte, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go new file mode 100644 index 00000000000..778f4e3a20a --- /dev/null +++ b/vendor/github.com/containers/common/pkg/apparmor/internal/supported/supported.go @@ -0,0 +1,113 @@ +package supported + +import ( + "os" + "os/exec" + "path/filepath" + "sync" + + "github.com/containers/storage/pkg/unshare" + runcaa "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate + +// ApparmorVerifier is the global struct for verifying if AppAmor is available +// on the system. +type ApparmorVerifier struct { + impl verifierImpl + parserBinaryPath string +} + +var ( + singleton *ApparmorVerifier + once sync.Once +) + +// NewAppArmorVerifier can be used to retrieve a new ApparmorVerifier instance. +func NewAppArmorVerifier() *ApparmorVerifier { + once.Do(func() { + singleton = &ApparmorVerifier{impl: &defaultVerifier{}} + }) + return singleton +} + +// IsSupported returns nil if AppAmor is supported by the host system. +// The method will error if: +// - the process runs in rootless mode +// - AppArmor is disabled by the host system +// - the `apparmor_parser` binary is not discoverable +func (a *ApparmorVerifier) IsSupported() error { + if a.impl.UnshareIsRootless() { + return errors.New("AppAmor is not supported on rootless containers") + } + if !a.impl.RuncIsEnabled() { + return errors.New("AppArmor not supported by the host system") + } + + _, err := a.FindAppArmorParserBinary() + return err +} + +// FindAppArmorParserBinary returns the `apparmor_parser` binary either from +// `/sbin` or from `$PATH`. It returns an error if the binary could not be +// found. +func (a *ApparmorVerifier) FindAppArmorParserBinary() (string, error) { + // Use the memoized path if available + if a.parserBinaryPath != "" { + logrus.Debugf("Using %s binary", a.parserBinaryPath) + return a.parserBinaryPath, nil + } + + const ( + binary = "apparmor_parser" + sbin = "/sbin" + ) + + // `/sbin` is not always in `$PATH`, so we check it explicitly + sbinBinaryPath := filepath.Join(sbin, binary) + if _, err := a.impl.OsStat(sbinBinaryPath); err == nil { + logrus.Debugf("Found %s binary in %s", binary, sbinBinaryPath) + a.parserBinaryPath = sbinBinaryPath + return sbinBinaryPath, nil + } + + // Fallback to checking $PATH + if path, err := a.impl.ExecLookPath(binary); err == nil { + logrus.Debugf("Found %s binary in %s", binary, path) + a.parserBinaryPath = path + return path, nil + } + + return "", errors.Errorf( + "%s binary neither found in %s nor $PATH", binary, sbin, + ) +} + +//counterfeiter:generate . verifierImpl +type verifierImpl interface { + UnshareIsRootless() bool + RuncIsEnabled() bool + OsStat(name string) (os.FileInfo, error) + ExecLookPath(file string) (string, error) +} + +type defaultVerifier struct{} + +func (d *defaultVerifier) UnshareIsRootless() bool { + return unshare.IsRootless() +} + +func (d *defaultVerifier) RuncIsEnabled() bool { + return runcaa.IsEnabled() +} + +func (d *defaultVerifier) OsStat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (d *defaultVerifier) ExecLookPath(file string) (string, error) { + return exec.LookPath(file) +} diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go new file mode 100644 index 00000000000..10c5dd7c4c6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go @@ -0,0 +1,209 @@ +package capabilities + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has been copied from github.com/docker/docker but been +// changed significantly to fit the needs of libpod. + +import ( + "sort" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/syndtr/gocapability/capability" +) + +var ( + // Used internally and populated during init(). + capabilityList []string + + // Used internally and populated during init(). + capsList []capability.Cap + + // ErrUnknownCapability is thrown when an unknown capability is processed. + ErrUnknownCapability = errors.New("unknown capability") + + // ContainerImageLabels - label can indicate the required + // capabilities required by containers to run the container image. + ContainerImageLabels = []string{"io.containers.capabilities"} +) + +// All is a special value used to add/drop all known capabilities. +// Useful on the CLI for `--cap-add=all` etc. +const All = "ALL" + +func getCapName(c capability.Cap) string { + return "CAP_" + strings.ToUpper(c.String()) +} + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capsList = append(capsList, cap) + capabilityList = append(capabilityList, getCapName(cap)) + sort.Strings(capabilityList) + } +} + +// stringInSlice determines if a string is in a string slice, returns bool +func stringInSlice(s string, sl []string) bool { + for _, i := range sl { + if i == s { + return true + } + } + return false +} + +var ( + boundingSetOnce sync.Once + boundingSetRet []string + boundingSetErr error +) + +// BoundingSet returns the capabilities in the current bounding set +func BoundingSet() ([]string, error) { + boundingSetOnce.Do(func() { + currentCaps, err := capability.NewPid2(0) + if err != nil { + boundingSetErr = err + return + } + err = currentCaps.Load() + if err != nil { + boundingSetErr = err + return + } + var r []string + for _, c := range capsList { + if !currentCaps.Get(capability.BOUNDING, c) { + continue + } + r = append(r, getCapName(c)) + } + boundingSetRet = r + sort.Strings(boundingSetRet) + boundingSetErr = err + }) + return boundingSetRet, boundingSetErr +} + +// AllCapabilities returns all known capabilities. +func AllCapabilities() []string { + return capabilityList +} + +// NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet +// present). +func NormalizeCapabilities(caps []string) ([]string, error) { + normalized := make([]string, len(caps)) + for i, c := range caps { + c = strings.ToUpper(c) + if c == All { + normalized = append(normalized, c) + continue + } + if !strings.HasPrefix(c, "CAP_") { + c = "CAP_" + c + } + if !stringInSlice(c, capabilityList) { + return nil, errors.Wrapf(ErrUnknownCapability, "%q", c) + } + normalized[i] = c + } + sort.Strings(normalized) + return normalized, nil +} + +// ValidateCapabilities validates if caps only contains valid capabilities. +func ValidateCapabilities(caps []string) error { + for _, c := range caps { + if !stringInSlice(c, capabilityList) { + return errors.Wrapf(ErrUnknownCapability, "%q", c) + } + } + return nil +} + +// MergeCapabilities computes a set of capabilities by adding capabilities +// to or dropping them from base. +// +// Note that: +// "ALL" in capAdd adds returns known capabilities +// "All" in capDrop returns only the capabilities specified in capAdd +func MergeCapabilities(base, adds, drops []string) ([]string, error) { + var caps []string + + // Normalize the base capabilities + base, err := NormalizeCapabilities(base) + if err != nil { + return nil, err + } + if len(adds) == 0 && len(drops) == 0 { + // Nothing to tweak; we're done + return base, nil + } + capDrop, err := NormalizeCapabilities(drops) + if err != nil { + return nil, err + } + capAdd, err := NormalizeCapabilities(adds) + if err != nil { + return nil, err + } + + if stringInSlice(All, capDrop) { + if stringInSlice(All, capAdd) { + return nil, errors.New("adding all caps and removing all caps not allowed") + } + // "Drop" all capabilities; return what's in capAdd instead + sort.Strings(capAdd) + return capAdd, nil + } + + if stringInSlice(All, capAdd) { + base, err = BoundingSet() + if err != nil { + return nil, err + } + capAdd = []string{} + } else { + for _, add := range capAdd { + if stringInSlice(add, capDrop) { + return nil, errors.Errorf("capability %q cannot be dropped and added", add) + } + } + } + + for _, drop := range capDrop { + if stringInSlice(drop, capAdd) { + return nil, errors.Errorf("capability %q cannot be dropped and added", drop) + } + } + + // Drop any capabilities in capDrop that are in base + for _, cap := range base { + if stringInSlice(cap, capDrop) { + continue + } + caps = append(caps, cap) + } + + // Add any capabilities in capAdd that are not in base + for _, cap := range capAdd { + if stringInSlice(cap, base) { + continue + } + caps = append(caps, cap) + } + sort.Strings(caps) + return caps, nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go new file mode 100644 index 00000000000..0fb61c75776 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio.go @@ -0,0 +1,148 @@ +package cgroups + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type blkioHandler struct{} + +func getBlkioHandler() *blkioHandler { + return &blkioHandler{} +} + +// Apply set the specified constraints +func (c *blkioHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.BlockIO == nil { + return nil + } + return fmt.Errorf("blkio apply function not implemented yet") +} + +// Create the cgroup +func (c *blkioHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Blkio) +} + +// Destroy the cgroup +func (c *blkioHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Blkio)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *blkioHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var ioServiceBytesRecursive []BlkIOEntry + + if ctr.cgroup2 { + // more details on the io.stat file format:X https://facebookmicrosites.github.io/cgroup2/docs/io-controller.html + values, err := readCgroup2MapFile(ctr, "io.stat") + if err != nil { + return err + } + for k, v := range values { + d := strings.Split(k, ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + for _, item := range v { + d := strings.Split(item, "=") + if len(d) != 2 { + continue + } + op := d[0] + + // Accommodate the cgroup v1 naming + switch op { + case "rbytes": + op = "read" + case "wbytes": + op = "write" + } + + value, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + entry := BlkIOEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + } + } else { + BlkioRoot := ctr.getCgroupv1Path(Blkio) + + p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive") + f, err := os.Open(p) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "open %s", p) + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 3 { + continue + } + d := strings.Split(parts[0], ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + op := parts[1] + + value, err := strconv.ParseUint(parts[2], 10, 0) + if err != nil { + return err + } + entry := BlkIOEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + if err := scanner.Err(); err != nil { + return errors.Wrapf(err, "parse %s", p) + } + } + m.Blkio = BlkioMetrics{IoServiceBytesRecursive: ioServiceBytesRecursive} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go new file mode 100644 index 00000000000..57997d65207 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go @@ -0,0 +1,671 @@ +package cgroups + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/storage/pkg/unshare" + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrCgroupDeleted means the cgroup was deleted + ErrCgroupDeleted = errors.New("cgroup deleted") + // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment + ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") + ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") +) + +// CgroupControl controls a cgroup hierarchy +type CgroupControl struct { + cgroup2 bool + path string + systemd bool + // List of additional cgroup subsystems joined that + // do not have a custom handler. + additionalControllers []controller +} + +// CPUUsage keeps stats for the CPU usage (unit: nanoseconds) +type CPUUsage struct { + Kernel uint64 + Total uint64 + PerCPU []uint64 +} + +// MemoryUsage keeps stats for the memory usage +type MemoryUsage struct { + Usage uint64 + Limit uint64 +} + +// CPUMetrics keeps stats for the CPU usage +type CPUMetrics struct { + Usage CPUUsage +} + +// BlkIOEntry describes an entry in the blkio stats +type BlkIOEntry struct { + Op string + Major uint64 + Minor uint64 + Value uint64 +} + +// BlkioMetrics keeps usage stats for the blkio cgroup controller +type BlkioMetrics struct { + IoServiceBytesRecursive []BlkIOEntry +} + +// MemoryMetrics keeps usage stats for the memory cgroup controller +type MemoryMetrics struct { + Usage MemoryUsage +} + +// PidsMetrics keeps usage stats for the pids cgroup controller +type PidsMetrics struct { + Current uint64 +} + +// Metrics keeps usage stats for the cgroup controllers +type Metrics struct { + CPU CPUMetrics + Blkio BlkioMetrics + Memory MemoryMetrics + Pids PidsMetrics +} + +type controller struct { + name string + symlink bool +} + +type controllerHandler interface { + Create(*CgroupControl) (bool, error) + Apply(*CgroupControl, *spec.LinuxResources) error + Destroy(*CgroupControl) error + Stat(*CgroupControl, *Metrics) error +} + +const ( + cgroupRoot = "/sys/fs/cgroup" + // CPU is the cpu controller + CPU = "cpu" + // CPUAcct is the cpuacct controller + CPUAcct = "cpuacct" + // CPUset is the cpuset controller + CPUset = "cpuset" + // Memory is the memory controller + Memory = "memory" + // Pids is the pids controller + Pids = "pids" + // Blkio is the blkio controller + Blkio = "blkio" +) + +var handlers map[string]controllerHandler + +func init() { + handlers = make(map[string]controllerHandler) + handlers[CPU] = getCPUHandler() + handlers[CPUset] = getCpusetHandler() + handlers[Memory] = getMemoryHandler() + handlers[Pids] = getPidsHandler() + handlers[Blkio] = getBlkioHandler() +} + +// getAvailableControllers get the available controllers +func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) { + if cgroup2 { + controllers := []controller{} + controllersFile := cgroupRoot + "/cgroup.controllers" + // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit + if unshare.IsRootless() { + userSlice, err := getCgroupPathForCurrentProcess() + if err != nil { + return controllers, err + } + // userSlice already contains '/' so not adding here + basePath := cgroupRoot + userSlice + controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) + } + controllersFileBytes, err := ioutil.ReadFile(controllersFile) + if err != nil { + return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile) + } + for _, controllerName := range strings.Fields(string(controllersFileBytes)) { + c := controller{ + name: controllerName, + symlink: false, + } + controllers = append(controllers, c) + } + return controllers, nil + } + + subsystems, _ := cgroupV1GetAllSubsystems() + controllers := []controller{} + // cgroupv1 and rootless: No subsystem is available: delegation is unsafe. + if unshare.IsRootless() { + return controllers, nil + } + + for _, name := range subsystems { + if _, found := exclude[name]; found { + continue + } + fileInfo, err := os.Stat(cgroupRoot + "/" + name) + if err != nil { + continue + } + c := controller{ + name: name, + symlink: !fileInfo.IsDir(), + } + controllers = append(controllers, c) + } + + return controllers, nil +} + +// GetAvailableControllers get string:bool map of all the available controllers +func GetAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) { + availableControllers, err := getAvailableControllers(exclude, cgroup2) + if err != nil { + return nil, err + } + controllerList := []string{} + for _, controller := range availableControllers { + controllerList = append(controllerList, controller.name) + } + + return controllerList, nil +} + +func cgroupV1GetAllSubsystems() ([]string, error) { + f, err := os.Open("/proc/cgroups") + if err != nil { + return nil, err + } + defer f.Close() + + subsystems := []string{} + + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + if text[0] != '#' { + parts := strings.Fields(text) + if len(parts) >= 4 && parts[3] != "0" { + subsystems = append(subsystems, parts[0]) + } + } + } + if err := s.Err(); err != nil { + return nil, err + } + return subsystems, nil +} + +func getCgroupPathForCurrentProcess() (string, error) { + path := fmt.Sprintf("/proc/%d/cgroup", os.Getpid()) + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + + cgroupPath := "" + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + procEntries := strings.SplitN(text, "::", 2) + // set process cgroupPath only if entry is valid + if len(procEntries) > 1 { + cgroupPath = procEntries[1] + } + } + if err := s.Err(); err != nil { + return cgroupPath, err + } + return cgroupPath, nil +} + +// getCgroupv1Path is a helper function to get the cgroup v1 path +func (c *CgroupControl) getCgroupv1Path(name string) string { + return filepath.Join(cgroupRoot, name, c.path) +} + +// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers +func createCgroupv2Path(path string) (deferredError error) { + if !strings.HasPrefix(path, cgroupRoot+"/") { + return fmt.Errorf("invalid cgroup path %s", path) + } + content, err := ioutil.ReadFile(cgroupRoot + "/cgroup.controllers") + if err != nil { + return err + } + ctrs := bytes.Fields(content) + res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...) + + current := "/sys/fs" + elements := strings.Split(path, "/") + for i, e := range elements[3:] { + current = filepath.Join(current, e) + if i > 0 { + if err := os.Mkdir(current, 0o755); err != nil { + if !os.IsExist(err) { + return err + } + } else { + // If the directory was created, be sure it is not left around on errors. + defer func() { + if deferredError != nil { + os.Remove(current) + } + }() + } + } + // We enable the controllers for all the path components except the last one. It is not allowed to add + // PIDs if there are already enabled controllers. + if i < len(elements[3:])-1 { + if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil { + return err + } + } + } + return nil +} + +// initialize initializes the specified hierarchy +func (c *CgroupControl) initialize() (err error) { + createdSoFar := map[string]controllerHandler{} + defer func() { + if err != nil { + for name, ctr := range createdSoFar { + if err := ctr.Destroy(c); err != nil { + logrus.Warningf("error cleaning up controller %s for %s", name, c.path) + } + } + } + }() + if c.cgroup2 { + if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil { + return errors.Wrapf(err, "error creating cgroup path %s", c.path) + } + } + for name, handler := range handlers { + created, err := handler.Create(c) + if err != nil { + return err + } + if created { + createdSoFar[name] = handler + } + } + + if !c.cgroup2 { + // We won't need to do this for cgroup v2 + for _, ctr := range c.additionalControllers { + if ctr.symlink { + continue + } + path := c.getCgroupv1Path(ctr.name) + if err := os.MkdirAll(path, 0o755); err != nil { + return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name) + } + } + } + + return nil +} + +func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { + cPath := c.getCgroupv1Path(controller) + _, err := os.Stat(cPath) + if err == nil { + return false, nil + } + + if !os.IsNotExist(err) { + return false, err + } + + if err := os.MkdirAll(cPath, 0o755); err != nil { + return false, errors.Wrapf(err, "error creating cgroup for %s", controller) + } + return true, nil +} + +func readFileAsUint64(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + v := cleanString(string(data)) + if v == "max" { + return math.MaxUint64, nil + } + ret, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ret, errors.Wrapf(err, "parse %s from %s", v, path) + } + return ret, nil +} + +func readFileByKeyAsUint64(path, key string) (uint64, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + for _, line := range strings.Split(string(content), "\n") { + fields := strings.SplitN(line, " ", 2) + if fields[0] == key { + v := cleanString(string(fields[1])) + if v == "max" { + return math.MaxUint64, nil + } + ret, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ret, errors.Wrapf(err, "parse %s from %s", v, path) + } + return ret, nil + } + } + + return 0, fmt.Errorf("no key named %s from %s", key, path) +} + +// New creates a new cgroup control +func New(path string, resources *spec.LinuxResources) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + } + + if !cgroup2 { + controllers, err := getAvailableControllers(handlers, false) + if err != nil { + return nil, err + } + control.additionalControllers = controllers + } + + if err := control.initialize(); err != nil { + return nil, err + } + + return control, nil +} + +// NewSystemd creates a new cgroup control +func NewSystemd(path string) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + systemd: true, + } + return control, nil +} + +// Load loads an existing cgroup control +func Load(path string) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + systemd: false, + } + if !cgroup2 { + controllers, err := getAvailableControllers(handlers, false) + if err != nil { + return nil, err + } + control.additionalControllers = controllers + } + if !cgroup2 { + oneExists := false + // check that the cgroup exists at least under one controller + for name := range handlers { + p := control.getCgroupv1Path(name) + if _, err := os.Stat(p); err == nil { + oneExists = true + break + } + } + + // if there is no controller at all, raise an error + if !oneExists { + if unshare.IsRootless() { + return nil, ErrCgroupV1Rootless + } + // compatible with the error code + // used by containerd/cgroups + return nil, ErrCgroupDeleted + } + } + return control, nil +} + +// CreateSystemdUnit creates the systemd cgroup +func (c *CgroupControl) CreateSystemdUnit(path string) error { + if !c.systemd { + return fmt.Errorf("the cgroup controller is not using systemd") + } + + conn, err := systemdDbus.NewWithContext(context.TODO()) + if err != nil { + return err + } + defer conn.Close() + + return systemdCreate(path, conn) +} + +// GetUserConnection returns an user connection to D-BUS +func GetUserConnection(uid int) (*systemdDbus.Conn, error) { + return systemdDbus.NewConnection(func() (*dbus.Conn, error) { + return dbusAuthConnection(uid, dbus.SessionBusPrivate) + }) +} + +// CreateSystemdUserUnit creates the systemd cgroup for the specified user +func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { + if !c.systemd { + return fmt.Errorf("the cgroup controller is not using systemd") + } + + conn, err := GetUserConnection(uid) + if err != nil { + return err + } + defer conn.Close() + + return systemdCreate(path, conn) +} + +func dbusAuthConnection(uid int, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + if err := conn.Hello(); err != nil { + return nil, err + } + + return conn, nil +} + +// Delete cleans a cgroup +func (c *CgroupControl) Delete() error { + return c.DeleteByPath(c.path) +} + +// DeleteByPathConn deletes the specified cgroup path using the specified +// dbus connection if needed. +func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) error { + if c.systemd { + return systemdDestroyConn(path, conn) + } + if c.cgroup2 { + return rmDirRecursively(filepath.Join(cgroupRoot, c.path)) + } + var lastError error + for _, h := range handlers { + if err := h.Destroy(c); err != nil { + lastError = err + } + } + + for _, ctr := range c.additionalControllers { + if ctr.symlink { + continue + } + p := c.getCgroupv1Path(ctr.name) + if err := rmDirRecursively(p); err != nil { + lastError = errors.Wrapf(err, "remove %s", p) + } + } + return lastError +} + +// DeleteByPath deletes the specified cgroup path +func (c *CgroupControl) DeleteByPath(path string) error { + if c.systemd { + conn, err := systemdDbus.NewWithContext(context.TODO()) + if err != nil { + return err + } + defer conn.Close() + return c.DeleteByPathConn(path, conn) + } + return c.DeleteByPathConn(path, nil) +} + +// Update updates the cgroups +func (c *CgroupControl) Update(resources *spec.LinuxResources) error { + for _, h := range handlers { + if err := h.Apply(c, resources); err != nil { + return err + } + } + return nil +} + +// AddPid moves the specified pid to the cgroup +func (c *CgroupControl) AddPid(pid int) error { + pidString := []byte(fmt.Sprintf("%d\n", pid)) + + if c.cgroup2 { + p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") + if err := ioutil.WriteFile(p, pidString, 0o644); err != nil { + return errors.Wrapf(err, "write %s", p) + } + return nil + } + + names := make([]string, 0, len(handlers)) + for n := range handlers { + names = append(names, n) + } + + for _, c := range c.additionalControllers { + if !c.symlink { + names = append(names, c.name) + } + } + + for _, n := range names { + // If we aren't using cgroup2, we won't write correctly to unified hierarchy + if !c.cgroup2 && n == "unified" { + continue + } + p := filepath.Join(c.getCgroupv1Path(n), "tasks") + if err := ioutil.WriteFile(p, pidString, 0o644); err != nil { + return errors.Wrapf(err, "write %s", p) + } + } + return nil +} + +// Stat returns usage statistics for the cgroup +func (c *CgroupControl) Stat() (*Metrics, error) { + m := Metrics{} + found := false + for _, h := range handlers { + if err := h.Stat(c, &m); err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + logrus.Warningf("Failed to retrieve cgroup stats: %v", err) + continue + } + found = true + } + if !found { + return nil, ErrStatCgroup + } + return &m, nil +} + +func readCgroup2MapPath(path string) (map[string][]string, error) { + ret := map[string][]string{} + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return ret, nil + } + return nil, errors.Wrapf(err, "open file %s", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + ret[parts[0]] = parts[1:] + } + if err := scanner.Err(); err != nil { + return nil, errors.Wrapf(err, "parsing file %s", path) + } + return ret, nil +} + +func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) { + p := filepath.Join(cgroupRoot, ctr.path, name) + + return readCgroup2MapPath(p) +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go new file mode 100644 index 00000000000..edb28ad1807 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go @@ -0,0 +1,130 @@ +//go:build linux +// +build linux + +package cgroups + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +var ( + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isUnified, isUnifiedErr +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + uid := os.Geteuid() + + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return false, err + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return false, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + + if len(parts) < 3 { + continue + } + + var cgroupPath string + + if cgroup2 { + cgroupPath = filepath.Join(cgroupRoot, parts[2]) + } else { + if parts[1] != "name=systemd" { + continue + } + cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) + } + + st, err := os.Stat(cgroupPath) + if err != nil { + return false, err + } + s := st.Sys() + if s == nil { + return false, fmt.Errorf("error stat cgroup path %s", cgroupPath) + } + + if int(s.(*syscall.Stat_t).Uid) != uid { + return false, nil + } + } + if err := scanner.Err(); err != nil { + return false, errors.Wrapf(err, "parsing file /proc/self/cgroup") + } + return true, nil +} + +// rmDirRecursively delete recursively a cgroup directory. +// It differs from os.RemoveAll as it doesn't attempt to unlink files. +// On cgroupfs we are allowed only to rmdir empty directories. +func rmDirRecursively(path string) error { + if err := os.Remove(path); err == nil || os.IsNotExist(err) { + return nil + } + entries, err := ioutil.ReadDir(path) + if err != nil { + return err + } + for _, i := range entries { + if i.IsDir() { + if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { + return err + } + } + } + + attempts := 0 + for { + err := os.Remove(path) + if err == nil || os.IsNotExist(err) { + return nil + } + if errors.Is(err, unix.EBUSY) { + // attempt up to 5 seconds if the cgroup is busy + if attempts < 500 { + time.Sleep(time.Millisecond * 10) + attempts++ + continue + } + } + return errors.Wrapf(err, "remove %s", path) + } +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go new file mode 100644 index 00000000000..b3dcb2d33da --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go @@ -0,0 +1,23 @@ +//go:build !linux +// +build !linux + +package cgroups + +import ( + "os" +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + return false, nil +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + return false, nil +} + +func rmDirRecursively(path string) error { + return os.RemoveAll(path) +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go new file mode 100644 index 00000000000..c9e94f26977 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cpu.go @@ -0,0 +1,159 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type cpuHandler struct{} + +func getCPUHandler() *cpuHandler { + return &cpuHandler{} +} + +func cleanString(s string) string { + return strings.Trim(s, "\n") +} + +func readAcct(ctr *CgroupControl, name string) (uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + return readFileAsUint64(p) +} + +func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + data, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "reading %s", p) + } + r := []uint64{} + for _, s := range strings.Split(string(data), " ") { + s = cleanString(s) + if s == "" { + break + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing %s", s) + } + r = append(r, v) + } + return r, nil +} + +// Apply set the specified constraints +func (c *cpuHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.CPU == nil { + return nil + } + return fmt.Errorf("cpu apply not implemented yet") +} + +// Create the cgroup +func (c *cpuHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(CPU) +} + +// Destroy the cgroup +func (c *cpuHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(CPU)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var err error + usage := CPUUsage{} + if ctr.cgroup2 { + values, err := readCgroup2MapFile(ctr, "cpu.stat") + if err != nil { + return err + } + if val, found := values["usage_usec"]; found { + usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return err + } + usage.Kernel *= 1000 + } + if val, found := values["system_usec"]; found { + usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return err + } + usage.Total *= 1000 + } + // FIXME: How to read usage.PerCPU? + } else { + usage.Total, err = readAcct(ctr, "cpuacct.usage") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.Total = 0 + } + usage.Kernel, err = readAcct(ctr, "cpuacct.usage_sys") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.Kernel = 0 + } + usage.PerCPU, err = readAcctList(ctr, "cpuacct.usage_percpu") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.PerCPU = nil + } + } + m.CPU = CPUMetrics{Usage: usage} + return nil +} + +// GetSystemCPUUsage returns the system usage for all the cgroups +func GetSystemCPUUsage() (uint64, error) { + cgroupv2, err := IsCgroup2UnifiedMode() + if err != nil { + return 0, err + } + if !cgroupv2 { + p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage") + return readFileAsUint64(p) + } + + files, err := ioutil.ReadDir(cgroupRoot) + if err != nil { + return 0, err + } + var total uint64 + for _, file := range files { + if !file.IsDir() { + continue + } + p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat") + + values, err := readCgroup2MapPath(p) + if err != nil { + return 0, err + } + + if val, found := values["usage_usec"]; found { + v, err := strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return 0, err + } + total += v * 1000 + } + } + return total, nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go new file mode 100644 index 00000000000..2bfeb80db5a --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go @@ -0,0 +1,84 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type cpusetHandler struct{} + +func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { + if dir == cgroupRoot { + return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file) + } + path := filepath.Join(dir, file) + parentPath := path + if cgroupv2 { + parentPath = fmt.Sprintf("%s.effective", parentPath) + } + data, err := ioutil.ReadFile(parentPath) + if err != nil { + return nil, errors.Wrapf(err, "open %s", path) + } + if strings.Trim(string(data), "\n") != "" { + return data, nil + } + data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, data, 0o644); err != nil { + return nil, errors.Wrapf(err, "write %s", path) + } + return data, nil +} + +func cpusetCopyFromParent(path string, cgroupv2 bool) error { + for _, file := range []string{"cpuset.cpus", "cpuset.mems"} { + if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil { + return err + } + } + return nil +} + +func getCpusetHandler() *cpusetHandler { + return &cpusetHandler{} +} + +// Apply set the specified constraints +func (c *cpusetHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.CPU == nil { + return nil + } + return fmt.Errorf("cpuset apply not implemented yet") +} + +// Create the cgroup +func (c *cpusetHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + path := filepath.Join(cgroupRoot, ctr.path) + return true, cpusetCopyFromParent(path, true) + } + + created, err := ctr.createCgroupDirectory(CPUset) + if !created || err != nil { + return created, err + } + return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset), false) +} + +// Destroy the cgroup +func (c *cpusetHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(CPUset)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *cpusetHandler) Stat(ctr *CgroupControl, m *Metrics) error { + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory.go b/vendor/github.com/containers/common/pkg/cgroups/memory.go new file mode 100644 index 00000000000..10d65893c6c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/memory.go @@ -0,0 +1,66 @@ +package cgroups + +import ( + "fmt" + "path/filepath" + + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +type memHandler struct{} + +func getMemoryHandler() *memHandler { + return &memHandler{} +} + +// Apply set the specified constraints +func (c *memHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.Memory == nil { + return nil + } + return fmt.Errorf("memory apply not implemented yet") +} + +// Create the cgroup +func (c *memHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Memory) +} + +// Destroy the cgroup +func (c *memHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Memory)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *memHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var err error + usage := MemoryUsage{} + + var memoryRoot string + var limitFilename string + + if ctr.cgroup2 { + memoryRoot = filepath.Join(cgroupRoot, ctr.path) + limitFilename = "memory.max" + if usage.Usage, err = readFileByKeyAsUint64(filepath.Join(memoryRoot, "memory.stat"), "anon"); err != nil { + return err + } + } else { + memoryRoot = ctr.getCgroupv1Path(Memory) + limitFilename = "memory.limit_in_bytes" + if usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil { + return err + } + } + + usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename)) + if err != nil { + return err + } + + m.Memory = MemoryMetrics{Usage: usage} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go new file mode 100644 index 00000000000..650120a560f --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/pids.go @@ -0,0 +1,68 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +type pidHandler struct{} + +func getPidsHandler() *pidHandler { + return &pidHandler{} +} + +// Apply set the specified constraints +func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.Pids == nil { + return nil + } + var PIDRoot string + + if ctr.cgroup2 { + PIDRoot = filepath.Join(cgroupRoot, ctr.path) + } else { + PIDRoot = ctr.getCgroupv1Path(Pids) + } + + p := filepath.Join(PIDRoot, "pids.max") + return ioutil.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0o644) +} + +// Create the cgroup +func (c *pidHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Pids) +} + +// Destroy the cgroup +func (c *pidHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Pids)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *pidHandler) Stat(ctr *CgroupControl, m *Metrics) error { + if ctr.path == "" { + // nothing we can do to retrieve the pids.current path + return nil + } + + var PIDRoot string + if ctr.cgroup2 { + PIDRoot = filepath.Join(cgroupRoot, ctr.path) + } else { + PIDRoot = ctr.getCgroupv1Path(Pids) + } + + current, err := readFileAsUint64(filepath.Join(PIDRoot, "pids.current")) + if err != nil { + return err + } + + m.Pids = PidsMetrics{Current: current} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go new file mode 100644 index 00000000000..92065a2d7cd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd.go @@ -0,0 +1,80 @@ +package cgroups + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" +) + +func systemdCreate(path string, c *systemdDbus.Conn) error { + slice, name := filepath.Split(path) + slice = strings.TrimSuffix(slice, "/") + + var lastError error + for i := 0; i < 2; i++ { + properties := []systemdDbus.Property{ + systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), + systemdDbus.PropWants(slice), + } + pMap := map[string]bool{ + "DefaultDependencies": false, + "MemoryAccounting": true, + "CPUAccounting": true, + "BlockIOAccounting": true, + } + if i == 0 { + pMap["Delegate"] = true + } + for k, v := range pMap { + p := systemdDbus.Property{ + Name: k, + Value: dbus.MakeVariant(v), + } + properties = append(properties, p) + } + + ch := make(chan string) + _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) + if err != nil { + lastError = err + continue + } + <-ch + return nil + } + return lastError +} + +/* + systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that + has the following license: + + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +func systemdDestroyConn(path string, c *systemdDbus.Conn) error { + name := filepath.Base(path) + + ch := make(chan string) + _, err := c.StopUnitContext(context.TODO(), name, "replace", ch) + if err != nil { + return err + } + <-ch + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_linux.go new file mode 100644 index 00000000000..749c89932d9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_linux.go @@ -0,0 +1,27 @@ +package cgroupv2 + +import ( + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + isCgroupV2Once sync.Once + isCgroupV2 bool + isCgroupV2Err error +) + +// Enabled returns whether we are running on cgroup v2 +func Enabled() (bool, error) { + isCgroupV2Once.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isCgroupV2, isCgroupV2Err = false, err + } else { + isCgroupV2, isCgroupV2Err = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isCgroupV2, isCgroupV2Err +} diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go new file mode 100644 index 00000000000..f61bd3bb26b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go @@ -0,0 +1,9 @@ +//go:build !linux +// +build !linux + +package cgroupv2 + +// Enabled returns whether we are running on cgroup v2 +func Enabled() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go new file mode 100644 index 00000000000..a86eca88efd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -0,0 +1,1308 @@ +package config + +import ( + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/capabilities" + "github.com/containers/storage/pkg/unshare" + units "github.com/docker/go-units" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // _configPath is the path to the containers/containers.conf + // inside a given config directory. + _configPath = "containers/containers.conf" + // DefaultContainersConfig holds the default containers config path + DefaultContainersConfig = "/usr/share/" + _configPath + // OverrideContainersConfig holds the default config path overridden by the root user + OverrideContainersConfig = "/etc/" + _configPath + // UserOverrideContainersConfig holds the containers config path overridden by the rootless user + UserOverrideContainersConfig = ".config/" + _configPath +) + +// RuntimeStateStore is a constant indicating which state store implementation +// should be used by engine +type RuntimeStateStore int + +const ( + // InvalidStateStore is an invalid state store + InvalidStateStore RuntimeStateStore = iota + // InMemoryStateStore is an in-memory state that will not persist data + // on containers and pods between engine instances or after system + // reboot + InMemoryStateStore RuntimeStateStore = iota + // SQLiteStateStore is a state backed by a SQLite database + // It is presently disabled + SQLiteStateStore RuntimeStateStore = iota + // BoltDBStateStore is a state backed by a BoltDB database + BoltDBStateStore RuntimeStateStore = iota +) + +// ProxyEnv is a list of Proxy Environment variables +var ProxyEnv = []string{ + "http_proxy", + "https_proxy", + "ftp_proxy", + "no_proxy", + "HTTP_PROXY", + "HTTPS_PROXY", + "FTP_PROXY", + "NO_PROXY", +} + +// Config contains configuration options for container tools +type Config struct { + // Containers specify settings that configure how containers will run ont the system + Containers ContainersConfig `toml:"containers"` + // Engine specifies how the container engine based on Engine will run + Engine EngineConfig `toml:"engine"` + // Machine specifies configurations of podman machine VMs + Machine MachineConfig `toml:"machine"` + // Network section defines the configuration of CNI Plugins + Network NetworkConfig `toml:"network"` + // Secret section defines configurations for the secret management + Secrets SecretConfig `toml:"secrets"` + // ConfigMap section defines configurations for the configmaps management + ConfigMaps ConfigMapConfig `toml:"configmaps"` +} + +// ContainersConfig represents the "containers" TOML config table +// containers global options for containers tools +type ContainersConfig struct { + + // Devices to add to all containers + Devices []string `toml:"devices,omitempty"` + + // Volumes to add to all containers + Volumes []string `toml:"volumes,omitempty"` + + // ApparmorProfile is the apparmor profile name which is used as the + // default for the runtime. + ApparmorProfile string `toml:"apparmor_profile,omitempty"` + + // Annotation to add to all containers + Annotations []string `toml:"annotations,omitempty"` + + // BaseHostsFile is the path to a hosts file, the entries from this file + // are added to the containers hosts file. As special value "image" is + // allowed which uses the /etc/hosts file from within the image and "none" + // which uses no base file at all. If it is empty we should default + // to /etc/hosts. + BaseHostsFile string `toml:"base_hosts_file,omitempty"` + + // Default way to create a cgroup namespace for the container + CgroupNS string `toml:"cgroupns,omitempty"` + + // Default cgroup configuration + Cgroups string `toml:"cgroups,omitempty"` + + // Capabilities to add to all containers. + DefaultCapabilities []string `toml:"default_capabilities,omitempty"` + + // Sysctls to add to all containers. + DefaultSysctls []string `toml:"default_sysctls,omitempty"` + + // DefaultUlimits specifies the default ulimits to apply to containers + DefaultUlimits []string `toml:"default_ulimits,omitempty"` + + // DefaultMountsFile is the path to the default mounts file for testing + DefaultMountsFile string `toml:"-"` + + // DNSServers set default DNS servers. + DNSServers []string `toml:"dns_servers,omitempty"` + + // DNSOptions set default DNS options. + DNSOptions []string `toml:"dns_options,omitempty"` + + // DNSSearches set default DNS search domains. + DNSSearches []string `toml:"dns_searches,omitempty"` + + // EnableKeyring tells the container engines whether to create + // a kernel keyring for use within the container + EnableKeyring bool `toml:"keyring,omitempty"` + + // EnableLabeling tells the container engines whether to use MAC + // Labeling to separate containers (SELinux) + EnableLabeling bool `toml:"label,omitempty"` + + // Env is the environment variable list for container process. + Env []string `toml:"env,omitempty"` + + // EnvHost Pass all host environment variables into the container. + EnvHost bool `toml:"env_host,omitempty"` + + // HostContainersInternalIP is used to set a specific host.containers.internal ip. + HostContainersInternalIP string `toml:"host_containers_internal_ip,omitempty"` + + // HTTPProxy is the proxy environment variable list to apply to container process + HTTPProxy bool `toml:"http_proxy,omitempty"` + + // Init tells container runtimes whether to run init inside the + // container that forwards signals and reaps processes. + Init bool `toml:"init,omitempty"` + + // InitPath is the path for init to run if the Init bool is enabled + InitPath string `toml:"init_path,omitempty"` + + // IPCNS way to to create a ipc namespace for the container + IPCNS string `toml:"ipcns,omitempty"` + + // LogDriver for the container. For example: k8s-file and journald + LogDriver string `toml:"log_driver,omitempty"` + + // LogSizeMax is the maximum number of bytes after which the log file + // will be truncated. It can be expressed as a human-friendly string + // that is parsed to bytes. + // Negative values indicate that the log file won't be truncated. + LogSizeMax int64 `toml:"log_size_max,omitempty,omitzero"` + + // Specifies default format tag for container log messages. + // This is useful for creating a specific tag for container log messages. + // Containers logs default to truncated container ID as a tag. + LogTag string `toml:"log_tag,omitempty"` + + // NetNS indicates how to create a network namespace for the container + NetNS string `toml:"netns,omitempty"` + + // NoHosts tells container engine whether to create its own /etc/hosts + NoHosts bool `toml:"no_hosts,omitempty"` + + // PidsLimit is the number of processes each container is restricted to + // by the cgroup process number controller. + PidsLimit int64 `toml:"pids_limit,omitempty,omitzero"` + + // PidNS indicates how to create a pid namespace for the container + PidNS string `toml:"pidns,omitempty"` + + // Copy the content from the underlying image into the newly created + // volume when the container is created instead of when it is started. + // If false, the container engine will not copy the content until + // the container is started. Setting it to true may have negative + // performance implications. + PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` + + // SeccompProfile is the seccomp.json profile path which is used as the + // default for the runtime. + SeccompProfile string `toml:"seccomp_profile,omitempty"` + + // ShmSize holds the size of /dev/shm. + ShmSize string `toml:"shm_size,omitempty"` + + // TZ sets the timezone inside the container + TZ string `toml:"tz,omitempty"` + + // Umask is the umask inside the container. + Umask string `toml:"umask,omitempty"` + + // UTSNS indicates how to create a UTS namespace for the container + UTSNS string `toml:"utsns,omitempty"` + + // UserNS indicates how to create a User namespace for the container + UserNS string `toml:"userns,omitempty"` + + // UserNSSize how many UIDs to allocate for automatically created UserNS + UserNSSize int `toml:"userns_size,omitempty,omitzero"` +} + +// EngineConfig contains configuration options used to set up a engine runtime +type EngineConfig struct { + // CgroupCheck indicates the configuration has been rewritten after an + // upgrade to Fedora 31 to change the default OCI runtime for cgroupv2v2. + CgroupCheck bool `toml:"cgroup_check,omitempty"` + + // CGroupManager is the CGroup Manager to use Valid values are "cgroupfs" + // and "systemd". + CgroupManager string `toml:"cgroup_manager,omitempty"` + + // NOTE: when changing this struct, make sure to update (*Config).Merge(). + + // ConmonEnvVars are environment variables to pass to the Conmon binary + // when it is launched. + ConmonEnvVars []string `toml:"conmon_env_vars,omitempty"` + + // ConmonPath is the path to the Conmon binary used for managing containers. + // The first path pointing to a valid file will be used. + ConmonPath []string `toml:"conmon_path,omitempty"` + + // CompatAPIEnforceDockerHub enforces using docker.io for completing + // short names in Podman's compatibility REST API. Note that this will + // ignore unqualified-search-registries and short-name aliases defined + // in containers-registries.conf(5). + CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"` + + // DetachKeys is the sequence of keys used to detach a container. + DetachKeys string `toml:"detach_keys,omitempty"` + + // EnablePortReservation determines whether engine will reserve ports on the + // host when they are forwarded to containers. When enabled, when ports are + // forwarded to containers, they are held open by conmon as long as the + // container is running, ensuring that they cannot be reused by other + // programs on the host. However, this can cause significant memory usage if + // a container has many ports forwarded to it. Disabling this can save + // memory. + EnablePortReservation bool `toml:"enable_port_reservation,omitempty"` + + // Environment variables to be used when running the container engine (e.g., Podman, Buildah). For example "http_proxy=internal.proxy.company.com" + Env []string `toml:"env,omitempty"` + + // EventsLogFilePath is where the events log is stored. + EventsLogFilePath string `toml:"events_logfile_path,omitempty"` + + // EventsLogFileMaxSize sets the maximum size for the events log. When the limit is exceeded, + // the logfile is rotated and the old one is deleted. + EventsLogFileMaxSize eventsLogMaxSize `toml:"events_logfile_max_size,omitzero"` + + // EventsLogger determines where events should be logged. + EventsLogger string `toml:"events_logger,omitempty"` + + // graphRoot internal stores the location of the graphroot + graphRoot string + + // HelperBinariesDir is a list of directories which are used to search for + // helper binaries. + HelperBinariesDir []string `toml:"helper_binaries_dir"` + + // configuration files. When the same filename is present in in + // multiple directories, the file in the directory listed last in + // this slice takes precedence. + HooksDir []string `toml:"hooks_dir,omitempty"` + + // ImageBuildFormat (DEPRECATED) indicates the default image format to + // building container images. Should use ImageDefaultFormat + ImageBuildFormat string `toml:"image_build_format,omitempty"` + + // ImageDefaultTransport is the default transport method used to fetch + // images. + ImageDefaultTransport string `toml:"image_default_transport,omitempty"` + + // ImageParallelCopies indicates the maximum number of image layers + // to be copied simultaneously. If this is zero, container engines + // will fall back to containers/image defaults. + ImageParallelCopies uint `toml:"image_parallel_copies,omitempty,omitzero"` + + // ImageDefaultFormat specified the manifest Type (oci, v2s2, or v2s1) + // to use when pulling, pushing, building container images. By default + // image pulled and pushed match the format of the source image. + // Building/committing defaults to OCI. + ImageDefaultFormat string `toml:"image_default_format,omitempty"` + + // InfraCommand is the command run to start up a pod infra container. + InfraCommand string `toml:"infra_command,omitempty"` + + // InfraImage is the image a pod infra container will use to manage + // namespaces. + InfraImage string `toml:"infra_image,omitempty"` + + // InitPath is the path to the container-init binary. + InitPath string `toml:"init_path,omitempty"` + + // LockType is the type of locking to use. + LockType string `toml:"lock_type,omitempty"` + + // MachineEnabled indicates if Podman is running in a podman-machine VM + // + // This method is soft deprecated, use machine.IsPodmanMachine instead + MachineEnabled bool `toml:"machine_enabled,omitempty"` + + // MultiImageArchive - if true, the container engine allows for storing + // archives (e.g., of the docker-archive transport) with multiple + // images. By default, Podman creates single-image archives. + MultiImageArchive bool `toml:"multi_image_archive,omitempty"` + + // Namespace is the engine namespace to use. Namespaces are used to create + // scopes to separate containers and pods in the state. When namespace is + // set, engine will only view containers and pods in the same namespace. All + // containers and pods created will default to the namespace set here. A + // namespace of "", the empty string, is equivalent to no namespace, and all + // containers and pods will be visible. The default namespace is "". + Namespace string `toml:"namespace,omitempty"` + + // NetworkCmdPath is the path to the slirp4netns binary. + NetworkCmdPath string `toml:"network_cmd_path,omitempty"` + + // NetworkCmdOptions is the default options to pass to the slirp4netns binary. + // For example "allow_host_loopback=true" + NetworkCmdOptions []string `toml:"network_cmd_options,omitempty"` + + // NoPivotRoot sets whether to set no-pivot-root in the OCI runtime. + NoPivotRoot bool `toml:"no_pivot_root,omitempty"` + + // NumLocks is the number of locks to make available for containers and + // pods. + NumLocks uint32 `toml:"num_locks,omitempty,omitzero"` + + // OCIRuntime is the OCI runtime to use. + OCIRuntime string `toml:"runtime,omitempty"` + + // OCIRuntimes are the set of configured OCI runtimes (default is runc). + OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` + + // PodExitPolicy determines the behaviour when the last container of a pod exits. + PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"` + + // PullPolicy determines whether to pull image before creating or running a container + // default is "missing" + PullPolicy string `toml:"pull_policy,omitempty"` + + // Indicates whether the application should be running in Remote mode + Remote bool `toml:"remote,omitempty"` + + // RemoteURI is deprecated, see ActiveService + // RemoteURI containers connection information used to connect to remote system. + RemoteURI string `toml:"remote_uri,omitempty"` + + // RemoteIdentity is deprecated, ServiceDestinations + // RemoteIdentity key file for RemoteURI + RemoteIdentity string `toml:"remote_identity,omitempty"` + + // ActiveService index to Destinations added v2.0.3 + ActiveService string `toml:"active_service,omitempty"` + + // ServiceDestinations mapped by service Names + ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` + + // RuntimePath is the path to OCI runtime binary for launching containers. + // The first path pointing to a valid file will be used This is used only + // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be + // backward compatible with older versions of Podman. + RuntimePath []string `toml:"runtime_path,omitempty"` + + // RuntimeSupportsJSON is the list of the OCI runtimes that support + // --format=json. + RuntimeSupportsJSON []string `toml:"runtime_supports_json,omitempty"` + + // RuntimeSupportsNoCgroups is a list of OCI runtimes that support + // running containers without CGroups. + RuntimeSupportsNoCgroups []string `toml:"runtime_supports_nocgroup,omitempty"` + + // RuntimeSupportsKVM is a list of OCI runtimes that support + // KVM separation for containers. + RuntimeSupportsKVM []string `toml:"runtime_supports_kvm,omitempty"` + + // SetOptions contains a subset of config options. It's used to indicate if + // a given option has either been set by the user or by the parsed + // configuration file. If not, the corresponding option might be + // overwritten by values from the database. This behavior guarantees + // backwards compat with older version of libpod and Podman. + SetOptions + + // SignaturePolicyPath is the path to a signature policy to use for + // validating images. If left empty, the containers/image default signature + // policy will be used. + SignaturePolicyPath string `toml:"-"` + + // SDNotify tells container engine to allow containers to notify the host systemd of + // readiness using the SD_NOTIFY mechanism. + SDNotify bool `toml:"-"` + + // StateType is the type of the backing state store. Avoid using multiple + // values for this with the same containers/storage configuration on the + // same system. Different state types do not interact, and each will see a + // separate set of containers, which may cause conflicts in + // containers/storage. As such this is not exposed via the config file. + StateType RuntimeStateStore `toml:"-"` + + // ServiceTimeout is the number of seconds to wait without a connection + // before the `podman system service` times out and exits + ServiceTimeout uint `toml:"service_timeout,omitempty,omitzero"` + + // StaticDir is the path to a persistent directory to store container + // files. + StaticDir string `toml:"static_dir,omitempty"` + + // StopTimeout is the number of seconds to wait for container to exit + // before sending kill signal. + StopTimeout uint `toml:"stop_timeout,omitempty,omitzero"` + + // ExitCommandDelay is the number of seconds to wait for the exit + // command to be send to the API process on the server. + ExitCommandDelay uint `toml:"exit_command_delay,omitempty,omitzero"` + + // ImageCopyTmpDir is the default location for storing temporary + // container image content, Can be overridden with the TMPDIR + // environment variable. If you specify "storage", then the + // location of the container/storage tmp directory will be used. + ImageCopyTmpDir string `toml:"image_copy_tmp_dir,omitempty"` + + // TmpDir is the path to a temporary directory to store per-boot container + // files. Must be stored in a tmpfs. + TmpDir string `toml:"tmp_dir,omitempty"` + + // VolumePath is the default location that named volumes will be created + // under. This convention is followed by the default volume driver, but + // may not be by other drivers. + VolumePath string `toml:"volume_path,omitempty"` + + // VolumePlugins is a set of plugins that can be used as the backend for + // Podman named volumes. Each volume is specified as a name (what Podman + // will refer to the plugin as) mapped to a path, which must point to a + // Unix socket that conforms to the Volume Plugin specification. + VolumePlugins map[string]string `toml:"volume_plugins,omitempty"` + + // ChownCopiedFiles tells the container engine whether to chown files copied + // into a container to the container's primary uid/gid. + ChownCopiedFiles bool `toml:"chown_copied_files,omitempty"` + + // CompressionFormat is the compression format used to compress image layers. + CompressionFormat string `toml:"compression_format,omitempty"` +} + +// SetOptions contains a subset of options in a Config. It's used to indicate if +// a given option has either been set by the user or by a parsed engine +// configuration file. If not, the corresponding option might be overwritten by +// values from the database. This behavior guarantees backwards compat with +// older version of libpod and Podman. +type SetOptions struct { + // StorageConfigRunRootSet indicates if the RunRoot has been explicitly set + // by the config or by the user. It's required to guarantee backwards + // compatibility with older versions of libpod for which we must query the + // database configuration. Not included in the on-disk config. + StorageConfigRunRootSet bool `toml:"-"` + + // StorageConfigGraphRootSet indicates if the RunRoot has been explicitly + // set by the config or by the user. It's required to guarantee backwards + // compatibility with older versions of libpod for which we must query the + // database configuration. Not included in the on-disk config. + StorageConfigGraphRootSet bool `toml:"-"` + + // StorageConfigGraphDriverNameSet indicates if the GraphDriverName has been + // explicitly set by the config or by the user. It's required to guarantee + // backwards compatibility with older versions of libpod for which we must + // query the database configuration. Not included in the on-disk config. + StorageConfigGraphDriverNameSet bool `toml:"-"` + + // StaticDirSet indicates if the StaticDir has been explicitly set by the + // config or by the user. It's required to guarantee backwards compatibility + // with older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + StaticDirSet bool `toml:"-"` + + // VolumePathSet indicates if the VolumePath has been explicitly set by the + // config or by the user. It's required to guarantee backwards compatibility + // with older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + VolumePathSet bool `toml:"-"` + + // TmpDirSet indicates if the TmpDir has been explicitly set by the config + // or by the user. It's required to guarantee backwards compatibility with + // older versions of libpod for which we must query the database + // configuration. Not included in the on-disk config. + TmpDirSet bool `toml:"-"` +} + +// NetworkConfig represents the "network" TOML config table +type NetworkConfig struct { + // NetworkBackend determines what backend should be used for Podman's + // networking. + NetworkBackend string `toml:"network_backend,omitempty"` + + // CNIPluginDirs is where CNI plugin binaries are stored. + CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` + + // DefaultNetwork is the network name of the default network + // to attach pods to. + DefaultNetwork string `toml:"default_network,omitempty"` + + // DefaultSubnet is the subnet to be used for the default network. + // If a network with the name given in DefaultNetwork is not present + // then a new network using this subnet will be created. + // Must be a valid IPv4 CIDR block. + DefaultSubnet string `toml:"default_subnet,omitempty"` + + // DefaultSubnetPools is a list of subnets and size which are used to + // allocate subnets automatically for podman network create. + // It will iterate through the list and will pick the first free subnet + // with the given size. This is only used for ipv4 subnets, ipv6 subnets + // are always assigned randomly. + DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"` + + // NetworkConfigDir is where network configuration files are stored. + NetworkConfigDir string `toml:"network_config_dir,omitempty"` +} + +type SubnetPool struct { + // Base is a bigger subnet which will be used to allocate a subnet with + // the given size. + Base *types.IPNet `toml:"base,omitempty"` + // Size is the CIDR for the new subnet. It must be equal or small + // than the CIDR from the base subnet. + Size int `toml:"size,omitempty"` +} + +// SecretConfig represents the "secret" TOML config table +type SecretConfig struct { + // Driver specifies the secret driver to use. + // Current valid value: + // * file + // * pass + Driver string `toml:"driver,omitempty"` + // Opts contains driver specific options + Opts map[string]string `toml:"opts,omitempty"` +} + +// ConfigMapConfig represents the "configmap" TOML config table +type ConfigMapConfig struct { + // Driver specifies the configmap driver to use. + // Current valid value: + // * file + // * pass + Driver string `toml:"driver,omitempty"` + // Opts contains driver specific options + Opts map[string]string `toml:"opts,omitempty"` +} + +// MachineConfig represents the "machine" TOML config table +type MachineConfig struct { + // Number of CPU's a machine is created with. + CPUs uint64 `toml:"cpus,omitempty,omitzero"` + // DiskSize is the size of the disk in GB created when init-ing a podman-machine VM + DiskSize uint64 `toml:"disk_size,omitempty,omitzero"` + // MachineImage is the image used when init-ing a podman-machine VM + Image string `toml:"image,omitempty"` + // Memory in MB a machine is created with. + Memory uint64 `toml:"memory,omitempty,omitzero"` + // User to use for rootless podman when init-ing a podman machine VM + User string `toml:"user,omitempty"` + // Volumes are host directories mounted into the VM by default. + Volumes []string `toml:"volumes"` +} + +// Destination represents destination for remote service +type Destination struct { + // URI, required. Example: ssh://root@example.com:22/run/podman/podman.sock + URI string `toml:"uri"` + + // Identity file with ssh key, optional + Identity string `toml:"identity,omitempty"` +} + +// NewConfig creates a new Config. It starts with an empty config and, if +// specified, merges the config at `userConfigPath` path. Depending if we're +// running as root or rootless, we then merge the system configuration followed +// by merging the default config (hard-coded default in memory). +// Note that the OCI runtime is hard-set to `crun` if we're running on a system +// with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This +// might change in the future. +func NewConfig(userConfigPath string) (*Config, error) { + // Generate the default config for the system + config, err := DefaultConfig() + if err != nil { + return nil, err + } + + // Now, gather the system configs and merge them as needed. + configs, err := systemConfigs() + if err != nil { + return nil, errors.Wrap(err, "finding config on system") + } + for _, path := range configs { + // Merge changes in later configs with the previous configs. + // Each config file that specified fields, will override the + // previous fields. + if err = readConfigFromFile(path, config); err != nil { + return nil, errors.Wrapf(err, "reading system config %q", path) + } + logrus.Debugf("Merged system config %q", path) + logrus.Tracef("%+v", config) + } + + // If the caller specified a config path to use, then we read it to + // override the system defaults. + if userConfigPath != "" { + var err error + // readConfigFromFile reads in container config in the specified + // file and then merge changes with the current default. + if err = readConfigFromFile(userConfigPath, config); err != nil { + return nil, errors.Wrapf(err, "reading user config %q", userConfigPath) + } + logrus.Debugf("Merged user config %q", userConfigPath) + logrus.Tracef("%+v", config) + } + config.addCAPPrefix() + + if err := config.Validate(); err != nil { + return nil, err + } + + if err := config.setupEnv(); err != nil { + return nil, err + } + + return config, nil +} + +// readConfigFromFile reads the specified config file at `path` and attempts to +// unmarshal its content into a Config. The config param specifies the previous +// default config. If the path, only specifies a few fields in the Toml file +// the defaults from the config parameter will be used for all other fields. +func readConfigFromFile(path string, config *Config) error { + logrus.Tracef("Reading configuration file %q", path) + meta, err := toml.DecodeFile(path, config) + if err != nil { + return errors.Wrapf(err, "decode configuration %v", path) + } + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Debugf("Failed to decode the keys %q from %q.", keys, path) + } + + return nil +} + +// addConfigs will search one level in the config dirPath for config files +// If the dirPath does not exist, addConfigs will return nil +func addConfigs(dirPath string, configs []string) ([]string, error) { + newConfigs := []string{} + + err := filepath.WalkDir(dirPath, + // WalkFunc to read additional configs + func(path string, d fs.DirEntry, err error) error { + switch { + case err != nil: + // return error (could be a permission problem) + return err + case d.IsDir(): + if path != dirPath { + // make sure to not recurse into sub-directories + return filepath.SkipDir + } + // ignore directories + return nil + default: + // only add *.conf files + if strings.HasSuffix(path, ".conf") { + newConfigs = append(newConfigs, path) + } + return nil + } + }, + ) + if os.IsNotExist(err) { + err = nil + } + sort.Strings(newConfigs) + return append(configs, newConfigs...), err +} + +// Returns the list of configuration files, if they exist in order of hierarchy. +// The files are read in order and each new file can/will override previous +// file settings. +func systemConfigs() ([]string, error) { + var err error + configs := []string{} + path := os.Getenv("CONTAINERS_CONF") + if path != "" { + if _, err := os.Stat(path); err != nil { + return nil, errors.Wrap(err, "CONTAINERS_CONF file") + } + return append(configs, path), nil + } + if _, err := os.Stat(DefaultContainersConfig); err == nil { + configs = append(configs, DefaultContainersConfig) + } + if _, err := os.Stat(OverrideContainersConfig); err == nil { + configs = append(configs, OverrideContainersConfig) + } + configs, err = addConfigs(OverrideContainersConfig+".d", configs) + if err != nil { + return nil, err + } + + path, err = ifRootlessConfigPath() + if err != nil { + return nil, err + } + if path != "" { + if _, err := os.Stat(path); err == nil { + configs = append(configs, path) + } + configs, err = addConfigs(path+".d", configs) + if err != nil { + return nil, err + } + } + return configs, nil +} + +// CheckCgroupsAndAdjustConfig checks if we're running rootless with the systemd +// cgroup manager. In case the user session isn't available, we're switching the +// cgroup manager to cgroupfs. Note, this only applies to rootless. +func (c *Config) CheckCgroupsAndAdjustConfig() { + if !unshare.IsRootless() || c.Engine.CgroupManager != SystemdCgroupsManager { + return + } + + session := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + hasSession := session != "" + if hasSession { + for _, part := range strings.Split(session, ",") { + if strings.HasPrefix(part, "unix:path=") { + _, err := os.Stat(strings.TrimPrefix(part, "unix:path=")) + hasSession = err == nil + break + } + } + } + + if !hasSession && unshare.GetRootlessUID() != 0 { + logrus.Warningf("The cgroupv2 manager is set to systemd but there is no systemd user session available") + logrus.Warningf("For using systemd, you may need to login using an user session") + logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", unshare.GetRootlessUID()) + logrus.Warningf("Falling back to --cgroup-manager=cgroupfs") + c.Engine.CgroupManager = CgroupfsCgroupsManager + } +} + +func (c *Config) addCAPPrefix() { + toCAPPrefixed := func(cap string) string { + if !strings.HasPrefix(strings.ToLower(cap), "cap_") { + return "CAP_" + strings.ToUpper(cap) + } + return cap + } + for i, cap := range c.Containers.DefaultCapabilities { + c.Containers.DefaultCapabilities[i] = toCAPPrefixed(cap) + } +} + +// Validate is the main entry point for library configuration validation. +func (c *Config) Validate() error { + if err := c.Containers.Validate(); err != nil { + return errors.Wrap(err, "validating containers config") + } + + if !c.Containers.EnableLabeling { + selinux.SetDisabled() + } + + if err := c.Engine.Validate(); err != nil { + return errors.Wrap(err, "validating engine configs") + } + + if err := c.Network.Validate(); err != nil { + return errors.Wrap(err, "validating network configs") + } + + return nil +} + +func (c *EngineConfig) findRuntime() string { + // Search for crun first followed by runc, kata, runsc + for _, name := range []string{"crun", "runc", "kata", "runsc"} { + for _, v := range c.OCIRuntimes[name] { + if _, err := os.Stat(v); err == nil { + return name + } + } + if path, err := exec.LookPath(name); err == nil { + logrus.Debugf("Found default OCI runtime %s path via PATH environment variable", path) + return name + } + } + return "" +} + +// Validate is the main entry point for Engine configuration validation +// It returns an `error` on validation failure, otherwise +// `nil`. +func (c *EngineConfig) Validate() error { + if err := c.validatePaths(); err != nil { + return err + } + + // Check if the pullPolicy from containers.conf is valid + // if it is invalid returns the error + pullPolicy := strings.ToLower(c.PullPolicy) + if _, err := ValidatePullPolicy(pullPolicy); err != nil { + return errors.Wrapf(err, "invalid pull type from containers.conf %q", c.PullPolicy) + } + return nil +} + +// Validate is the main entry point for containers configuration validation +// It returns an `error` on validation failure, otherwise +// `nil`. +func (c *ContainersConfig) Validate() error { + if err := c.validateUlimits(); err != nil { + return err + } + + if err := c.validateDevices(); err != nil { + return err + } + + if err := c.validateTZ(); err != nil { + return err + } + + if err := c.validateUmask(); err != nil { + return err + } + + if c.LogSizeMax >= 0 && c.LogSizeMax < OCIBufSize { + return errors.Errorf("log size max should be negative or >= %d", OCIBufSize) + } + + if _, err := units.FromHumanSize(c.ShmSize); err != nil { + return errors.Errorf("invalid --shm-size %s, %q", c.ShmSize, err) + } + + return nil +} + +// Validate is the main entry point for network configuration validation. +// The parameter `onExecution` specifies if the validation should include +// execution checks. It returns an `error` on validation failure, otherwise +// `nil`. +func (c *NetworkConfig) Validate() error { + if &c.DefaultSubnetPools != &DefaultSubnetPools { + for _, pool := range c.DefaultSubnetPools { + if pool.Base.IP.To4() == nil { + return errors.Errorf("invalid subnet pool ip %q", pool.Base.IP) + } + ones, _ := pool.Base.IPNet.Mask.Size() + if ones > pool.Size { + return errors.Errorf("invalid subnet pool, size is bigger than subnet %q", &pool.Base.IPNet) + } + if pool.Size > 32 { + return errors.New("invalid subnet pool size, must be between 0-32") + } + } + } + + if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) { + return nil + } + + for _, pluginDir := range c.CNIPluginDirs { + if err := isDirectory(pluginDir); err == nil { + return nil + } + } + + return errors.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ",")) +} + +// FindConmon iterates over (*Config).ConmonPath and returns the path +// to first (version) matching conmon binary. If non is found, we try +// to do a path lookup of "conmon". +func (c *Config) FindConmon() (string, error) { + foundOutdatedConmon := false + for _, path := range c.Engine.ConmonPath { + stat, err := os.Stat(path) + if err != nil { + continue + } + if stat.IsDir() { + continue + } + if err := probeConmon(path); err != nil { + logrus.Warnf("Conmon at %s invalid: %v", path, err) + foundOutdatedConmon = true + continue + } + logrus.Debugf("Using conmon: %q", path) + return path, nil + } + + // Search the $PATH as last fallback + if path, err := exec.LookPath("conmon"); err == nil { + if err := probeConmon(path); err != nil { + logrus.Warnf("Conmon at %s is invalid: %v", path, err) + foundOutdatedConmon = true + } else { + logrus.Debugf("Using conmon from $PATH: %q", path) + return path, nil + } + } + + if foundOutdatedConmon { + return "", errors.Wrapf(ErrConmonOutdated, + "please update to v%d.%d.%d or later", + _conmonMinMajorVersion, _conmonMinMinorVersion, _conmonMinPatchVersion) + } + + return "", errors.Wrapf(ErrInvalidArg, + "could not find a working conmon binary (configured options: %v)", + c.Engine.ConmonPath) +} + +// GetDefaultEnv returns the environment variables for the container. +// It will check the HTTPProxy and HostEnv booleans and add the appropriate +// environment variables to the container. +func (c *Config) GetDefaultEnv() []string { + return c.GetDefaultEnvEx(c.Containers.EnvHost, c.Containers.HTTPProxy) +} + +// GetDefaultEnvEx returns the environment variables for the container. +// It will check the HTTPProxy and HostEnv boolean parameters and return the appropriate +// environment variables for the container. +func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { + var env []string + if envHost { + env = append(env, os.Environ()...) + } else if httpProxy { + for _, p := range ProxyEnv { + if val, ok := os.LookupEnv(p); ok { + env = append(env, fmt.Sprintf("%s=%s", p, val)) + } + } + } + return append(env, c.Containers.Env...) +} + +// Capabilities returns the capabilities parses the Add and Drop capability +// list from the default capabiltiies for the container +func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { + userNotRoot := func(user string) bool { + if user == "" || user == "root" || user == "0" { + return false + } + return true + } + + defaultCapabilities := c.Containers.DefaultCapabilities + if userNotRoot(user) { + defaultCapabilities = []string{} + } + + return capabilities.MergeCapabilities(defaultCapabilities, addCapabilities, dropCapabilities) +} + +// Device parses device mapping string to a src, dest & permissions string +// Valid values for device looklike: +// '/dev/sdc" +// '/dev/sdc:/dev/xvdc" +// '/dev/sdc:/dev/xvdc:rwm" +// '/dev/sdc:rm" +func Device(device string) (src, dst, permissions string, err error) { + permissions = "rwm" + split := strings.Split(device, ":") + switch len(split) { + case 3: + if !IsValidDeviceMode(split[2]) { + return "", "", "", errors.Errorf("invalid device mode: %s", split[2]) + } + permissions = split[2] + fallthrough + case 2: + if IsValidDeviceMode(split[1]) { + permissions = split[1] + } else { + if split[1] == "" || split[1][0] != '/' { + return "", "", "", errors.Errorf("invalid device mode: %s", split[1]) + } + dst = split[1] + } + fallthrough + case 1: + if !strings.HasPrefix(split[0], "/dev/") { + return "", "", "", errors.Errorf("invalid device mode: %s", split[0]) + } + src = split[0] + default: + return "", "", "", errors.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + return src, dst, permissions, nil +} + +// IsValidDeviceMode checks if the mode for device is valid or not. +// IsValid mode is a composition of r (read), w (write), and m (mknod). +func IsValidDeviceMode(mode string) bool { + legalDeviceMode := map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// resolveHomeDir converts a path referencing the home directory via "~" +// to an absolute path +func resolveHomeDir(path string) (string, error) { + // check if the path references the home dir to avoid work + // don't use strings.HasPrefix(path, "~") as this doesn't match "~" alone + // use strings.HasPrefix(...) to not match "something/~/something" + if !(path == "~" || strings.HasPrefix(path, "~/")) { + // path does not reference home dir -> Nothing to do + return path, nil + } + + // only get HomeDir when necessary + home, err := unshare.HomeDir() + if err != nil { + return "", err + } + + // replace the first "~" (start of path) with the HomeDir to resolve "~" + return strings.Replace(path, "~", home, 1), nil +} + +func rootlessConfigPath() (string, error) { + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, _configPath), nil + } + home, err := unshare.HomeDir() + if err != nil { + return "", err + } + + return filepath.Join(home, UserOverrideContainersConfig), nil +} + +func stringsEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +var ( + configErr error + configMutex sync.Mutex + config *Config +) + +// Default returns the default container config. +// Configuration files will be read in the following files: +// * /usr/share/containers/containers.conf +// * /etc/containers/containers.conf +// * $HOME/.config/containers/containers.conf # When run in rootless mode +// Fields in latter files override defaults set in previous files and the +// default config. +// None of these files are required, and not all fields need to be specified +// in each file, only the fields you want to override. +// The system defaults container config files can be overwritten using the +// CONTAINERS_CONF environment variable. This is usually done for testing. +func Default() (*Config, error) { + configMutex.Lock() + defer configMutex.Unlock() + if config != nil || configErr != nil { + return config, configErr + } + return defConfig() +} + +func defConfig() (*Config, error) { + config, configErr = NewConfig("") + return config, configErr +} + +func Path() string { + if path := os.Getenv("CONTAINERS_CONF"); path != "" { + return path + } + if unshare.IsRootless() { + if rpath, err := rootlessConfigPath(); err == nil { + return rpath + } + return "$HOME/" + UserOverrideContainersConfig + } + return OverrideContainersConfig +} + +// ReadCustomConfig reads the custom config and only generates a config based on it +// If the custom config file does not exists, function will return an empty config +func ReadCustomConfig() (*Config, error) { + path, err := customConfigFile() + if err != nil { + return nil, err + } + newConfig := &Config{} + if _, err := os.Stat(path); err == nil { + if err := readConfigFromFile(path, newConfig); err != nil { + return nil, err + } + } else { + if !os.IsNotExist(err) { + return nil, err + } + } + return newConfig, nil +} + +// Write writes the configuration to the default file +func (c *Config) Write() error { + var err error + path, err := customConfigFile() + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) + if err != nil { + return err + } + defer configFile.Close() + enc := toml.NewEncoder(configFile) + if err := enc.Encode(c); err != nil { + return err + } + return nil +} + +// Reload clean the cached config and reloads the configuration from containers.conf files +// This function is meant to be used for long-running processes that need to reload potential changes made to +// the cached containers.conf files. +func Reload() (*Config, error) { + configMutex.Lock() + defer configMutex.Unlock() + return defConfig() +} + +func (c *Config) ActiveDestination() (uri, identity string, err error) { + if uri, found := os.LookupEnv("CONTAINER_HOST"); found { + if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found { + identity = v + } + return uri, identity, nil + } + connEnv := os.Getenv("CONTAINER_CONNECTION") + switch { + case connEnv != "": + d, found := c.Engine.ServiceDestinations[connEnv] + if !found { + return "", "", errors.Errorf("environment variable CONTAINER_CONNECTION=%q service destination not found", connEnv) + } + return d.URI, d.Identity, nil + + case c.Engine.ActiveService != "": + d, found := c.Engine.ServiceDestinations[c.Engine.ActiveService] + if !found { + return "", "", errors.Errorf("%q service destination not found", c.Engine.ActiveService) + } + return d.URI, d.Identity, nil + case c.Engine.RemoteURI != "": + return c.Engine.RemoteURI, c.Engine.RemoteIdentity, nil + } + return "", "", errors.New("no service destination configured") +} + +// FindHelperBinary will search the given binary name in the configured directories. +// If searchPATH is set to true it will also search in $PATH. +func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { + dir_list := c.Engine.HelperBinariesDir + + // If set, search this directory first. This is used in testing. + if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { + dir_list = append([]string{dir}, dir_list...) + } + + for _, path := range dir_list { + fullpath := filepath.Join(path, name) + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + if searchPATH { + return exec.LookPath(name) + } + configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." + if len(c.Engine.HelperBinariesDir) == 0 { + return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) + } + return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) +} + +// ImageCopyTmpDir default directory to store temporary image files during copy +func (c *Config) ImageCopyTmpDir() (string, error) { + if path, found := os.LookupEnv("TMPDIR"); found { + return path, nil + } + switch c.Engine.ImageCopyTmpDir { + case "": + return "", nil + case "storage": + return filepath.Join(c.Engine.graphRoot, "tmp"), nil + default: + if filepath.IsAbs(c.Engine.ImageCopyTmpDir) { + return c.Engine.ImageCopyTmpDir, nil + } + } + + return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) +} + +// setupEnv sets the environment variables for the engine +func (c *Config) setupEnv() error { + for _, env := range c.Engine.Env { + splitEnv := strings.SplitN(env, "=", 2) + if len(splitEnv) != 2 { + logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) + continue + } + // skip if the env is already defined + if _, ok := os.LookupEnv(splitEnv[0]); ok { + logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", splitEnv[0]) + continue + } + if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil { + return err + } + } + return nil +} + +// eventsLogMaxSize is the type used by EventsLogFileMaxSize +type eventsLogMaxSize uint64 + +// UnmarshalText parses the JSON encoding of eventsLogMaxSize and +// stores it in a value. +func (e *eventsLogMaxSize) UnmarshalText(text []byte) error { + // REMOVE once writing works + if string(text) == "" { + return nil + } + val, err := units.FromHumanSize((string(text))) + if err != nil { + return err + } + if val < 0 { + return fmt.Errorf("events log file max size cannot be negative: %s", string(text)) + } + *e = eventsLogMaxSize(uint64(val)) + return nil +} + +// MarshalText returns the JSON encoding of eventsLogMaxSize. +func (e eventsLogMaxSize) MarshalText() ([]byte, error) { + if uint64(e) == DefaultEventsLogSizeMax || e == 0 { + v := []byte{} + return v, nil + } + return []byte(fmt.Sprintf("%d", e)), nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go new file mode 100644 index 00000000000..5abb51f30cd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -0,0 +1,30 @@ +package config + +import ( + "os" +) + +// podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return rootlessConfigPath() +} + +func ifRootlessConfigPath() (string, error) { + return rootlessConfigPath() +} + +var defaultHelperBinariesDir = []string{ + // Homebrew install paths + "/usr/local/opt/podman/libexec", + "/opt/homebrew/bin", + "/opt/homebrew/opt/podman/libexec", + "/usr/local/bin", + // default paths + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/libexec/podman", + "/usr/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go new file mode 100644 index 00000000000..85404a48ddf --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -0,0 +1,25 @@ +package config + +import ( + "os" +) + +// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return rootlessConfigPath() +} + +func ifRootlessConfigPath() (string, error) { + return rootlessConfigPath() +} + +var defaultHelperBinariesDir = []string{ + "/usr/local/bin", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go new file mode 100644 index 00000000000..da0ae871a81 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -0,0 +1,44 @@ +package config + +import ( + "os" + + "github.com/containers/storage/pkg/unshare" + selinux "github.com/opencontainers/selinux/go-selinux" +) + +func selinuxEnabled() bool { + return selinux.GetEnabled() +} + +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + if unshare.IsRootless() { + path, err := rootlessConfigPath() + if err != nil { + return "", err + } + return path, nil + } + return OverrideContainersConfig, nil +} + +func ifRootlessConfigPath() (string, error) { + if unshare.IsRootless() { + path, err := rootlessConfigPath() + if err != nil { + return "", err + } + return path, nil + } + return "", nil +} + +var defaultHelperBinariesDir = []string{ + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/libexec/podman", + "/usr/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go new file mode 100644 index 00000000000..bfb9675824b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -0,0 +1,116 @@ +//go:build !remote +// +build !remote + +package config + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" + + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// isDirectory tests whether the given path exists and is a directory. It +// follows symlinks. +func isDirectory(path string) error { + path, err := resolveHomeDir(path) + if err != nil { + return err + } + + info, err := os.Stat(path) + if err != nil { + return err + } + + if !info.Mode().IsDir() { + // Return a PathError to be consistent with os.Stat(). + return &os.PathError{ + Op: "stat", + Path: path, + Err: syscall.ENOTDIR, + } + } + + return nil +} + +func (c *EngineConfig) validatePaths() error { + // Relative paths can cause nasty bugs, because core paths we use could + // shift between runs or even parts of the program. - The OCI runtime + // uses a different working directory than we do, for example. + if c.StaticDir != "" && !filepath.IsAbs(c.StaticDir) { + return errors.Errorf("static directory must be an absolute path - instead got %q", c.StaticDir) + } + if c.TmpDir != "" && !filepath.IsAbs(c.TmpDir) { + return errors.Errorf("temporary directory must be an absolute path - instead got %q", c.TmpDir) + } + if c.VolumePath != "" && !filepath.IsAbs(c.VolumePath) { + return errors.Errorf("volume path must be an absolute path - instead got %q", c.VolumePath) + } + return nil +} + +func (c *ContainersConfig) validateDevices() error { + for _, d := range c.Devices { + _, _, _, err := Device(d) + if err != nil { + return err + } + } + return nil +} + +func (c *ContainersConfig) validateUlimits() error { + for _, u := range c.DefaultUlimits { + ul, err := units.ParseUlimit(u) + if err != nil { + return errors.Wrapf(err, "unrecognized ulimit %s", u) + } + _, err = ul.GetRlimit() + if err != nil { + return err + } + } + return nil +} + +func (c *ContainersConfig) validateTZ() error { + if c.TZ == "local" || c.TZ == "" { + return nil + } + + lookupPaths := []string{ + "/usr/share/zoneinfo", + "/etc/zoneinfo", + } + + for _, paths := range lookupPaths { + zonePath := filepath.Join(paths, c.TZ) + if _, err := os.Stat(zonePath); err == nil { + // found zone information + return nil + } + } + + return errors.Errorf( + "find timezone %s in paths: %s", + c.TZ, strings.Join(lookupPaths, ", "), + ) +} + +func (c *ContainersConfig) validateUmask() error { + validUmask := regexp.MustCompile(`^[0-7]{1,4}$`) + if !validUmask.MatchString(c.Umask) { + return errors.Errorf("not a valid umask %s", c.Umask) + } + return nil +} + +func isRemote() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go new file mode 100644 index 00000000000..bff869efa96 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -0,0 +1,34 @@ +//go:build remote +// +build remote + +package config + +// isDirectory tests whether the given path exists and is a directory. It +// follows symlinks. +func isDirectory(path string) error { + return nil +} + +func isRemote() bool { + return true +} + +func (c *EngineConfig) validatePaths() error { + return nil +} + +func (c *ContainersConfig) validateDevices() error { + return nil +} + +func (c *ContainersConfig) validateUlimits() error { + return nil +} + +func (c *ContainersConfig) validateTZ() error { + return nil +} + +func (c *ContainersConfig) validateUmask() error { + return nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go new file mode 100644 index 00000000000..64e4fcfcdf5 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package config + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go new file mode 100644 index 00000000000..dbe7ba00d60 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -0,0 +1,19 @@ +package config + +import "os" + +// podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil +} + +func ifRootlessConfigPath() (string, error) { + return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil +} + +var defaultHelperBinariesDir = []string{ + "C:\\Program Files\\RedHat\\Podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf new file mode 100644 index 00000000000..a4e755a665b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -0,0 +1,677 @@ +# The containers configuration file specifies all of the available configuration +# command-line options/flags for container engine tools like Podman & Buildah, +# but in a TOML format that can be easily modified and versioned. + +# Please refer to containers.conf(5) for details of all configuration options. +# Not all container engines implement all of the options. +# All of the options have hard coded defaults and these options will override +# the built in defaults. Users can then override these options via the command +# line. Container engines will read containers.conf files in up to three +# locations in the following order: +# 1. /usr/share/containers/containers.conf +# 2. /etc/containers/containers.conf +# 3. $HOME/.config/containers/containers.conf (Rootless containers ONLY) +# Items specified in the latter containers.conf, if they exist, override the +# previous containers.conf settings, or the default settings. + +[containers] + +# List of annotation. Specified as +# "key = value" +# If it is empty or commented out, no annotations will be added +# +#annotations = [] + +# Used to change the name of the default AppArmor profile of container engine. +# +#apparmor_profile = "container-default" + +# The hosts entries from the base hosts file are added to the containers hosts +# file. This must be either an absolute path or as special values "image" which +# uses the hosts file from the container image or "none" which means +# no base hosts file is used. The default is "" which will use /etc/hosts. +# +#base_hosts_file = "" + +# Default way to to create a cgroup namespace for the container +# Options are: +# `private` Create private Cgroup Namespace for the container. +# `host` Share host Cgroup Namespace with the container. +# +#cgroupns = "private" + +# Control container cgroup configuration +# Determines whether the container will create CGroups. +# Options are: +# `enabled` Enable cgroup support within container +# `disabled` Disable cgroup support, will inherit cgroups from parent +# `no-conmon` Do not create a cgroup dedicated to conmon. +# +#cgroups = "enabled" + +# List of default capabilities for containers. If it is empty or commented out, +# the default capabilities defined in the container engine will be added. +# +default_capabilities = [ + "CHOWN", + "DAC_OVERRIDE", + "FOWNER", + "FSETID", + "KILL", + "NET_BIND_SERVICE", + "SETFCAP", + "SETGID", + "SETPCAP", + "SETUID", + "SYS_CHROOT" +] + +# A list of sysctls to be set in containers by default, +# specified as "name=value", +# for example:"net.ipv4.ping_group_range=0 0". +# +default_sysctls = [ + "net.ipv4.ping_group_range=0 0", +] + +# A list of ulimits to be set in containers by default, specified as +# "=:", for example: +# "nofile=1024:2048" +# See setrlimit(2) for a list of resource names. +# Any limit not specified here will be inherited from the process launching the +# container engine. +# Ulimits has limits for non privileged container engines. +# +#default_ulimits = [ +# "nofile=1280:2560", +#] + +# List of devices. Specified as +# "::", for example: +# "/dev/sdc:/dev/xvdc:rwm". +# If it is empty or commented out, only the default devices will be used +# +#devices = [] + +# List of default DNS options to be added to /etc/resolv.conf inside of the container. +# +#dns_options = [] + +# List of default DNS search domains to be added to /etc/resolv.conf inside of the container. +# +#dns_searches = [] + +# Set default DNS servers. +# This option can be used to override the DNS configuration passed to the +# container. The special value "none" can be specified to disable creation of +# /etc/resolv.conf in the container. +# The /etc/resolv.conf file in the image will be used without changes. +# +#dns_servers = [] + +# Environment variable list for the conmon process; used for passing necessary +# environment variables to conmon or the runtime. +# +#env = [ +# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", +# "TERM=xterm", +#] + +# Pass all host environment variables into the container. +# +#env_host = false + +# Set the ip for the host.containers.internal entry in the containers /etc/hosts +# file. This can be set to "none" to disable adding this entry. By default it +# will automatically choose the host ip. +# +# NOTE: When using podman machine this entry will never be added to the containers +# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore +# it is not possible to disable the entry in this case. +# +#host_containers_internal_ip = "" + +# Default proxy environment variables passed into the container. +# The environment variables passed in include: +# http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of +# these. This option is needed when host system uses a proxy but container +# should not use proxy. Proxy environment variables specified for the container +# in any other way will override the values passed from the host. +# +#http_proxy = true + +# Run an init inside the container that forwards signals and reaps processes. +# +#init = false + +# Container init binary, if init=true, this is the init binary to be used for containers. +# +#init_path = "/usr/libexec/podman/catatonit" + +# Default way to to create an IPC namespace (POSIX SysV IPC) for the container +# Options are: +# "host" Share host IPC Namespace with the container. +# "none" Create shareable IPC Namespace for the container without a private /dev/shm. +# "private" Create private IPC Namespace for the container, other containers are not allowed to share it. +# "shareable" Create shareable IPC Namespace for the container. +# +#ipcns = "shareable" + +# keyring tells the container engine whether to create +# a kernel keyring for use within the container. +# +#keyring = true + +# label tells the container engine whether to use container separation using +# MAC(SELinux) labeling or not. +# The label flag is ignored on label disabled systems. +# +#label = true + +# Logging driver for the container. Available options: k8s-file and journald. +# +#log_driver = "k8s-file" + +# Maximum size allowed for the container log file. Negative numbers indicate +# that no size limit is imposed. If positive, it must be >= 8192 to match or +# exceed conmon's read buffer. The file is truncated and re-opened so the +# limit is never exceeded. +# +#log_size_max = -1 + +# Specifies default format tag for container log messages. +# This is useful for creating a specific tag for container log messages. +# Containers logs default to truncated container ID as a tag. +# +#log_tag = "" + +# Default way to to create a Network namespace for the container +# Options are: +# `private` Create private Network Namespace for the container. +# `host` Share host Network Namespace with the container. +# `none` Containers do not use the network +# +#netns = "private" + +# Create /etc/hosts for the container. By default, container engine manage +# /etc/hosts, automatically adding the container's own IP address. +# +#no_hosts = false + +# Default way to to create a PID namespace for the container +# Options are: +# `private` Create private PID Namespace for the container. +# `host` Share host PID Namespace with the container. +# +#pidns = "private" + +# Maximum number of processes allowed in a container. +# +#pids_limit = 2048 + +# Copy the content from the underlying image into the newly created volume +# when the container is created instead of when it is started. If false, +# the container engine will not copy the content until the container is started. +# Setting it to true may have negative performance implications. +# +#prepare_volume_on_create = false + +# Path to the seccomp.json profile which is used as the default seccomp profile +# for the runtime. +# +#seccomp_profile = "/usr/share/containers/seccomp.json" + +# Size of /dev/shm. Specified as . +# Unit is optional, values: +# b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). +# If the unit is omitted, the system uses bytes. +# +#shm_size = "65536k" + +# Set timezone in container. Takes IANA timezones as well as "local", +# which sets the timezone in the container to match the host machine. +# +#tz = "" + +# Set umask inside the container +# +#umask = "0022" + +# Default way to to create a User namespace for the container +# Options are: +# `auto` Create unique User Namespace for the container. +# `host` Share host User Namespace with the container. +# +#userns = "host" + +# Number of UIDs to allocate for the automatic container creation. +# UIDs are allocated from the "container" UIDs listed in +# /etc/subuid & /etc/subgid +# +#userns_size = 65536 + +# Default way to to create a UTS namespace for the container +# Options are: +# `private` Create private UTS Namespace for the container. +# `host` Share host UTS Namespace with the container. +# +#utsns = "private" + +# List of volumes. Specified as +# "::", for example: +# "/db:/var/lib/db:ro". +# If it is empty or commented out, no volumes will be added +# +#volumes = [] + +[secrets] +#driver = "file" + +[secrets.opts] +#root = "/example/directory" + +[network] + +# Network backend determines what network driver will be used to set up and tear down container networks. +# Valid values are "cni" and "netavark". +# The default value is empty which means that it will automatically choose CNI or netavark. If there are +# already containers/images or CNI networks preset it will choose CNI. +# +# Before changing this value all containers must be stopped otherwise it is likely that +# iptables rules and network interfaces might leak on the host. A reboot will fix this. +# +#network_backend = "" + +# Path to directory where CNI plugin binaries are located. +# +#cni_plugin_dirs = [ +# "/usr/local/libexec/cni", +# "/usr/libexec/cni", +# "/usr/local/lib/cni", +# "/usr/lib/cni", +# "/opt/cni/bin", +#] + +# The network name of the default network to attach pods to. +# +#default_network = "podman" + +# The default subnet for the default network given in default_network. +# If a network with that name does not exist, a new network using that name and +# this subnet will be created. +# Must be a valid IPv4 CIDR prefix. +# +#default_subnet = "10.88.0.0/16" + +# DefaultSubnetPools is a list of subnets and size which are used to +# allocate subnets automatically for podman network create. +# It will iterate through the list and will pick the first free subnet +# with the given size. This is only used for ipv4 subnets, ipv6 subnets +# are always assigned randomly. +# +#default_subnet_pools = [ +# {"base" = "10.89.0.0/16", "size" = 24}, +# {"base" = "10.90.0.0/15", "size" = 24}, +# {"base" = "10.92.0.0/14", "size" = 24}, +# {"base" = "10.96.0.0/11", "size" = 24}, +# {"base" = "10.128.0.0/9", "size" = 24}, +#] + +# Path to the directory where network configuration files are located. +# For the CNI backend the default is "/etc/cni/net.d" as root +# and "$HOME/.config/cni/net.d" as rootless. +# For the netavark backend "/etc/containers/networks" is used as root +# and "$graphroot/networks" as rootless. +# +#network_config_dir = "/etc/cni/net.d/" + +[engine] +# Index to the active service +# +#active_service = production + +# The compression format to use when pushing an image. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# +#compression_format = "gzip" + + +# Cgroup management implementation used for the runtime. +# Valid options "systemd" or "cgroupfs" +# +#cgroup_manager = "systemd" + +# Environment variables to pass into conmon +# +#conmon_env_vars = [ +# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +#] + +# Paths to look for the conmon container manager binary +# +#conmon_path = [ +# "/usr/libexec/podman/conmon", +# "/usr/local/libexec/podman/conmon", +# "/usr/local/lib/podman/conmon", +# "/usr/bin/conmon", +# "/usr/sbin/conmon", +# "/usr/local/bin/conmon", +# "/usr/local/sbin/conmon" +#] + +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + +# Specify the keys sequence used to detach a container. +# Format is a single character [a-Z] or a comma separated sequence of +# `ctrl-`, where `` is one of: +# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_` +# +#detach_keys = "ctrl-p,ctrl-q" + +# Determines whether engine will reserve ports on the host when they are +# forwarded to containers. When enabled, when ports are forwarded to containers, +# ports are held open by as long as the container is running, ensuring that +# they cannot be reused by other programs on the host. However, this can cause +# significant memory usage if a container has many ports forwarded to it. +# Disabling this can save memory. +# +#enable_port_reservation = true + +# Environment variables to be used when running the container engine (e.g., Podman, Buildah). +# For example "http_proxy=internal.proxy.company.com". +# Note these environment variables will not be used within the container. +# Set the env section under [containers] table, if you want to set environment variables for the container. +# +#env = [] + +# Define where event logs will be stored, when events_logger is "file". +#events_logfile_path="" + +# Sets the maximum size for events_logfile_path. +# The size can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). +# The format for the size is ``, e.g., `1b` or `3g`. +# If no unit is included then the size will be read in bytes. +# When the limit is exceeded, the logfile will be rotated and the old one will be deleted. +# If the maximum size is set to 0, then no limit will be applied, +# and the logfile will not be rotated. +#events_logfile_max_size = "1m" + +# Selects which logging mechanism to use for container engine events. +# Valid values are `journald`, `file` and `none`. +# +#events_logger = "journald" + +# A is a list of directories which are used to search for helper binaries. +# +#helper_binaries_dir = [ +# "/usr/local/libexec/podman", +# "/usr/local/lib/podman", +# "/usr/libexec/podman", +# "/usr/lib/podman", +#] + +# Path to OCI hooks directories for automatically executed hooks. +# +#hooks_dir = [ +# "/usr/share/containers/oci/hooks.d", +#] + +# Manifest Type (oci, v2s2, or v2s1) to use when pulling, pushing, building +# container images. By default image pulled and pushed match the format of the +# source image. Building/committing defaults to OCI. +# +#image_default_format = "" + +# Default transport method for pulling and pushing for images +# +#image_default_transport = "docker://" + +# Maximum number of image layers to be copied (pulled/pushed) simultaneously. +# Not setting this field, or setting it to zero, will fall back to containers/image defaults. +# +#image_parallel_copies = 0 + +# Default command to run the infra container +# +#infra_command = "/pause" + +# Infra (pause) container image name for pod infra containers. When running a +# pod, we start a `pause` process in a container to hold open the namespaces +# associated with the pod. This container does nothing other then sleep, +# reserving the pods resources for the lifetime of the pod. By default container +# engines run a builtin container using the pause executable. If you want override +# specify an image to pull. +# +#infra_image = "" + +# Specify the locking mechanism to use; valid values are "shm" and "file". +# Change the default only if you are sure of what you are doing, in general +# "file" is useful only on platforms where cgo is not available for using the +# faster "shm" lock type. You may need to run "podman system renumber" after +# you change the lock type. +# +#lock_type** = "shm" + +# MultiImageArchive - if true, the container engine allows for storing archives +# (e.g., of the docker-archive transport) with multiple images. By default, +# Podman creates single-image archives. +# +#multi_image_archive = "false" + +# Default engine namespace +# If engine is joined to a namespace, it will see only containers and pods +# that were created in the same namespace, and will create new containers and +# pods in that namespace. +# The default namespace is "", which corresponds to no namespace. When no +# namespace is set, all containers and pods are visible. +# +#namespace = "" + +# Path to the slirp4netns binary +# +#network_cmd_path = "" + +# Default options to pass to the slirp4netns binary. +# Valid options values are: +# +# - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`). +# Default is false. +# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`). +# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`). +# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`). +# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only). +# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to. +# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only). +# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to. +# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default. +# Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container +# network namespace, usually `10.0.2.100`. If your application requires the real source IP address, +# e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for +# rootless containers when connected to user-defined networks. +# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but +# preserves the correct source IP address. This port handler cannot be used for user-defined networks. +# +#network_cmd_options = [] + +# Whether to use chroot instead of pivot_root in the runtime +# +#no_pivot_root = false + +# Number of locks available for containers and pods. +# If this is changed, a lock renumber must be performed (e.g. with the +# 'podman system renumber' command). +# +#num_locks = 2048 + +# Set the exit policy of the pod when the last container exits. +#pod_exit_policy = "continue" + +# Whether to pull new image before running a container +# +#pull_policy = "missing" + +# Indicates whether the application should be running in remote mode. This flag modifies the +# --remote option on container engines. Setting the flag to true will default +# `podman --remote=true` for access to the remote Podman service. +# +#remote = false + +# Default OCI runtime +# +#runtime = "crun" + +# List of the OCI runtimes that support --format=json. When json is supported +# engine will use it for reporting nicer errors. +# +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] + +# List of the OCI runtimes that supports running containers with KVM Separation. +# +#runtime_supports_kvm = ["kata", "krun"] + +# List of the OCI runtimes that supports running containers without cgroups. +# +#runtime_supports_nocgroups = ["crun", "krun"] + +# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment +# variable. If you specify "storage", then the location of the +# container/storage tmp directory will be used. +# image_copy_tmp_dir="/var/tmp" + +# Number of seconds to wait without a connection +# before the `podman system service` times out and exits +# +#service_timeout = 5 + +# Directory for persistent engine files (database, etc) +# By default, this will be configured relative to where the containers/storage +# stores containers +# Uncomment to change location from this default +# +#static_dir = "/var/lib/containers/storage/libpod" + +# Number of seconds to wait for container to exit before sending kill signal. +# +#stop_timeout = 10 + +# Number of seconds to wait before exit command in API process is given to. +# This mimics Docker's exec cleanup behaviour, where the default is 5 minutes (value is in seconds). +# +#exit_command_delay = 300 + +# map of service destinations +# +#[service_destinations] +# [service_destinations.production] +# URI to access the Podman service +# Examples: +# rootless "unix://run/user/$UID/podman/podman.sock" (Default) +# rootful "unix://run/podman/podman.sock (Default) +# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock +# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock +# +# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" +# Path to file containing ssh identity key +# identity = "~/.ssh/id_rsa" + +# Directory for temporary files. Must be tmpfs (wiped after reboot) +# +#tmp_dir = "/run/libpod" + +# Directory for libpod named volumes. +# By default, this will be configured relative to where containers/storage +# stores containers. +# Uncomment to change location from this default. +# +#volume_path = "/var/lib/containers/storage/volumes" + +# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc) +[engine.runtimes] +#crun = [ +# "/usr/bin/crun", +# "/usr/sbin/crun", +# "/usr/local/bin/crun", +# "/usr/local/sbin/crun", +# "/sbin/crun", +# "/bin/crun", +# "/run/current-system/sw/bin/crun", +#] + +#kata = [ +# "/usr/bin/kata-runtime", +# "/usr/sbin/kata-runtime", +# "/usr/local/bin/kata-runtime", +# "/usr/local/sbin/kata-runtime", +# "/sbin/kata-runtime", +# "/bin/kata-runtime", +# "/usr/bin/kata-qemu", +# "/usr/bin/kata-fc", +#] + +#runc = [ +# "/usr/bin/runc", +# "/usr/sbin/runc", +# "/usr/local/bin/runc", +# "/usr/local/sbin/runc", +# "/sbin/runc", +# "/bin/runc", +# "/usr/lib/cri-o-runc/sbin/runc", +#] + +#runsc = [ +# "/usr/bin/runsc", +# "/usr/sbin/runsc", +# "/usr/local/bin/runsc", +# "/usr/local/sbin/runsc", +# "/bin/runsc", +# "/sbin/runsc", +# "/run/current-system/sw/bin/runsc", +#] + +#krun = [ +# "/usr/bin/krun", +# "/usr/local/bin/krun", +#] + +[engine.volume_plugins] +#testplugin = "/run/podman/plugins/test.sock" + +[machine] +# Number of CPU's a machine is created with. +# +#cpus=1 + +# The size of the disk in GB created when init-ing a podman-machine VM. +# +#disk_size=10 + +# The image used when creating a podman-machine VM. +# +#image = "testing" + +# Memory in MB a machine is created with. +# +#memory=2048 + +# The username to use and create on the podman machine OS for rootless +# container access. +# +#user = "core" + +# Host directories to be mounted as volumes into the VM by default. +# Environment variables like $HOME as well as complete paths are supported for +# the source and destination. An optional third field `:ro` can be used to +# tell the container engines to mount the volume readonly. +# +# volumes = [ +# "$HOME:$HOME", +#] + +# The [machine] table MUST be the last entry in this file. +# (Unless another table is added) +# TOML does not provide a way to end a table other than a further table being +# defined, so every key hereafter will be part of [machine] and not the +# main config. diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go new file mode 100644 index 00000000000..8979a406bc7 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -0,0 +1,627 @@ +package config + +import ( + "bytes" + "fmt" + "net" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/apparmor" + "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/unshare" + "github.com/containers/storage/types" + "github.com/opencontainers/selinux/go-selinux" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // _conmonMinMajorVersion is the major version required for conmon. + _conmonMinMajorVersion = 2 + + // _conmonMinMinorVersion is the minor version required for conmon. + _conmonMinMinorVersion = 0 + + // _conmonMinPatchVersion is the sub-minor version required for conmon. + _conmonMinPatchVersion = 1 + + // _conmonVersionFormatErr is used when the expected versio-format of conmon + // has changed. + _conmonVersionFormatErr = "conmon version changed format" + + // _defaultGraphRoot points to the default path of the graph root. + _defaultGraphRoot = "/var/lib/containers/storage" + + // _defaultTransport is a prefix that we apply to an image name to check + // docker hub first for the image. + _defaultTransport = "docker://" +) + +var ( + // DefaultInitPath is the default path to the container-init binary + DefaultInitPath = "/usr/libexec/podman/catatonit" + // DefaultInfraImage to use for infra container + DefaultInfraImage = "" + // DefaultRootlessSHMLockPath is the default path for rootless SHM locks + DefaultRootlessSHMLockPath = "/libpod_rootless_lock" + // DefaultDetachKeys is the default keys sequence for detaching a + // container + DefaultDetachKeys = "ctrl-p,ctrl-q" + // ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH) + // is out of date for the current podman version + ErrConmonOutdated = errors.New("outdated conmon version") + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = errors.New("invalid argument") + // DefaultHooksDirs defines the default hooks directory + DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"} + // DefaultCapabilities for the default_capabilities option in the containers.conf file + DefaultCapabilities = []string{ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT", + } + + // It may seem a bit unconventional, but it is necessary to do so + DefaultCNIPluginDirs = []string{ + "/usr/local/libexec/cni", + "/usr/libexec/cni", + "/usr/local/lib/cni", + "/usr/lib/cni", + "/opt/cni/bin", + } + DefaultSubnetPools = []SubnetPool{ + // 10.89.0.0/24-10.255.255.0/24 + parseSubnetPool("10.89.0.0/16", 24), + parseSubnetPool("10.90.0.0/15", 24), + parseSubnetPool("10.92.0.0/14", 24), + parseSubnetPool("10.96.0.0/11", 24), + parseSubnetPool("10.128.0.0/9", 24), + } + // additionalHelperBinariesDir is an extra helper binaries directory that + // should be set during link-time, if different packagers put their + // helper binary in a different location + additionalHelperBinariesDir string +) + +// nolint:unparam +func parseSubnetPool(subnet string, size int) SubnetPool { + _, n, _ := net.ParseCIDR(subnet) + return SubnetPool{ + Base: &nettypes.IPNet{IPNet: *n}, + Size: size, + } +} + +const ( + // _etcDir is the sysconfdir where podman should look for system config files. + // It can be overridden at build time. + _etcDir = "/etc" + // InstallPrefix is the prefix where podman will be installed. + // It can be overridden at build time. + _installPrefix = "/usr" + // CgroupfsCgroupsManager represents cgroupfs native cgroup manager + CgroupfsCgroupsManager = "cgroupfs" + // DefaultApparmorProfile specifies the default apparmor profile for the container. + DefaultApparmorProfile = apparmor.Profile + // DefaultHostsFile is the default path to the hosts file + DefaultHostsFile = "/etc/hosts" + // SystemdCgroupsManager represents systemd native cgroup manager + SystemdCgroupsManager = "systemd" + // DefaultLogSizeMax is the default value for the maximum log size + // allowed for a container. Negative values mean that no limit is imposed. + DefaultLogSizeMax = -1 + // DefaultEventsLogSize is the default value for the maximum events log size + // before rotation. + DefaultEventsLogSizeMax = uint64(1000000) + // DefaultPidsLimit is the default value for maximum number of processes + // allowed inside a container + DefaultPidsLimit = 2048 + // DefaultPullPolicy pulls the image if it does not exist locally + DefaultPullPolicy = "missing" + // DefaultSignaturePolicyPath is the default value for the + // policy.json file. + DefaultSignaturePolicyPath = "/etc/containers/policy.json" + // DefaultSubnet is the subnet that will be used for the default + // network. + DefaultSubnet = "10.88.0.0/16" + // DefaultRootlessSignaturePolicyPath is the location within + // XDG_CONFIG_HOME of the rootless policy.json file. + DefaultRootlessSignaturePolicyPath = "containers/policy.json" + // DefaultShmSize default value + DefaultShmSize = "65536k" + // DefaultUserNSSize default value + DefaultUserNSSize = 65536 + // OCIBufSize limits maximum LogSizeMax + OCIBufSize = 8192 + // SeccompOverridePath if this exists it overrides the default seccomp path. + SeccompOverridePath = _etcDir + "/containers/seccomp.json" + // SeccompDefaultPath defines the default seccomp path. + SeccompDefaultPath = _installPrefix + "/share/containers/seccomp.json" +) + +// DefaultConfig defines the default values from containers.conf +func DefaultConfig() (*Config, error) { + defaultEngineConfig, err := defaultConfigFromMemory() + if err != nil { + return nil, err + } + + defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath + if unshare.IsRootless() { + configHome, err := homedir.GetConfigHome() + if err != nil { + return nil, err + } + sigPath := filepath.Join(configHome, DefaultRootlessSignaturePolicyPath) + defaultEngineConfig.SignaturePolicyPath = sigPath + if _, err := os.Stat(sigPath); err != nil { + if _, err := os.Stat(DefaultSignaturePolicyPath); err == nil { + defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath + } + } + } + + cgroupNS := "host" + if cgroup2, _ := cgroupv2.Enabled(); cgroup2 { + cgroupNS = "private" + } + + return &Config{ + Containers: ContainersConfig{ + Devices: []string{}, + Volumes: []string{}, + Annotations: []string{}, + ApparmorProfile: DefaultApparmorProfile, + BaseHostsFile: "", + CgroupNS: cgroupNS, + Cgroups: "enabled", + DefaultCapabilities: DefaultCapabilities, + DefaultSysctls: []string{}, + DefaultUlimits: getDefaultProcessLimits(), + DNSServers: []string{}, + DNSOptions: []string{}, + DNSSearches: []string{}, + EnableKeyring: true, + EnableLabeling: selinuxEnabled(), + Env: []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "TERM=xterm", + }, + EnvHost: false, + HTTPProxy: true, + Init: false, + InitPath: "", + IPCNS: "shareable", + LogDriver: defaultLogDriver(), + LogSizeMax: DefaultLogSizeMax, + NetNS: "private", + NoHosts: false, + PidsLimit: DefaultPidsLimit, + PidNS: "private", + ShmSize: DefaultShmSize, + TZ: "", + Umask: "0022", + UTSNS: "private", + UserNSSize: DefaultUserNSSize, + }, + Network: NetworkConfig{ + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + DefaultSubnetPools: DefaultSubnetPools, + CNIPluginDirs: DefaultCNIPluginDirs, + }, + Engine: *defaultEngineConfig, + Secrets: defaultSecretConfig(), + Machine: defaultMachineConfig(), + }, nil +} + +// defaultSecretConfig returns the default secret configuration. +// Please note that the default is choosing the "file" driver. +func defaultSecretConfig() SecretConfig { + return SecretConfig{ + Driver: "file", + } +} + +// defaultMachineConfig returns the default machine configuration. +func defaultMachineConfig() MachineConfig { + return MachineConfig{ + CPUs: 1, + DiskSize: 100, + Image: getDefaultMachineImage(), + Memory: 2048, + User: getDefaultMachineUser(), + Volumes: []string{"$HOME:$HOME"}, + } +} + +// defaultConfigFromMemory returns a default engine configuration. Note that the +// config is different for root and rootless. It also parses the storage.conf. +func defaultConfigFromMemory() (*EngineConfig, error) { + c := new(EngineConfig) + tmp, err := defaultTmpDir() + if err != nil { + return nil, err + } + c.TmpDir = tmp + + c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") + + c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) + + c.CompatAPIEnforceDockerHub = true + + if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { + types.SetDefaultConfigFilePath(path) + } + storeOpts, err := types.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID()) + if err != nil { + return nil, err + } + + if storeOpts.GraphRoot == "" { + logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot) + storeOpts.GraphRoot = _defaultGraphRoot + } + c.graphRoot = storeOpts.GraphRoot + c.ImageCopyTmpDir = getDefaultTmpDir() + c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") + c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") + + c.HelperBinariesDir = defaultHelperBinariesDir + if additionalHelperBinariesDir != "" { + c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir) + } + c.HooksDir = DefaultHooksDirs + c.ImageDefaultTransport = _defaultTransport + c.StateType = BoltDBStateStore + + c.ImageBuildFormat = "oci" + + c.CgroupManager = defaultCgroupManager() + c.ServiceTimeout = uint(5) + c.StopTimeout = uint(10) + c.ExitCommandDelay = uint(5 * 60) + c.Remote = isRemote() + c.OCIRuntimes = map[string][]string{ + "crun": { + "/usr/bin/crun", + "/usr/sbin/crun", + "/usr/local/bin/crun", + "/usr/local/sbin/crun", + "/sbin/crun", + "/bin/crun", + "/run/current-system/sw/bin/crun", + }, + "runc": { + "/usr/bin/runc", + "/usr/sbin/runc", + "/usr/local/bin/runc", + "/usr/local/sbin/runc", + "/sbin/runc", + "/bin/runc", + "/usr/lib/cri-o-runc/sbin/runc", + "/run/current-system/sw/bin/runc", + }, + "kata": { + "/usr/bin/kata-runtime", + "/usr/sbin/kata-runtime", + "/usr/local/bin/kata-runtime", + "/usr/local/sbin/kata-runtime", + "/sbin/kata-runtime", + "/bin/kata-runtime", + "/usr/bin/kata-qemu", + "/usr/bin/kata-fc", + }, + "runsc": { + "/usr/bin/runsc", + "/usr/sbin/runsc", + "/usr/local/bin/runsc", + "/usr/local/sbin/runsc", + "/bin/runsc", + "/sbin/runsc", + "/run/current-system/sw/bin/runsc", + }, + "krun": { + "/usr/bin/krun", + "/usr/local/bin/krun", + }, + } + // Needs to be called after populating c.OCIRuntimes + c.OCIRuntime = c.findRuntime() + + c.ConmonEnvVars = []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + } + c.ConmonPath = []string{ + "/usr/libexec/podman/conmon", + "/usr/local/libexec/podman/conmon", + "/usr/local/lib/podman/conmon", + "/usr/bin/conmon", + "/usr/sbin/conmon", + "/usr/local/bin/conmon", + "/usr/local/sbin/conmon", + "/run/current-system/sw/bin/conmon", + } + c.PullPolicy = DefaultPullPolicy + c.RuntimeSupportsJSON = []string{ + "crun", + "runc", + "kata", + "runsc", + "krun", + } + c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} + c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"} + c.InitPath = DefaultInitPath + c.NoPivotRoot = false + + c.InfraImage = DefaultInfraImage + c.EnablePortReservation = true + c.NumLocks = 2048 + c.EventsLogger = defaultEventsLogger() + c.DetachKeys = DefaultDetachKeys + c.SDNotify = false + // TODO - ideally we should expose a `type LockType string` along with + // constants. + c.LockType = "shm" + c.MachineEnabled = false + c.ChownCopiedFiles = true + + c.PodExitPolicy = defaultPodExitPolicy + + return c, nil +} + +func defaultTmpDir() (string, error) { + if !unshare.IsRootless() { + return "/run/libpod", nil + } + + runtimeDir, err := util.GetRuntimeDir() + if err != nil { + return "", err + } + libpodRuntimeDir := filepath.Join(runtimeDir, "libpod") + + if err := os.Mkdir(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { + if !os.IsExist(err) { + return "", err + } else if err := os.Chmod(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { + // The directory already exist, just set the sticky bit + return "", errors.Wrap(err, "set sticky bit on") + } + } + return filepath.Join(libpodRuntimeDir, "tmp"), nil +} + +// probeConmon calls conmon --version and verifies it is a new enough version for +// the runtime expectations the container engine currently has. +func probeConmon(conmonBinary string) error { + cmd := exec.Command(conmonBinary, "--version") + var out bytes.Buffer + cmd.Stdout = &out + if err := cmd.Run(); err != nil { + return err + } + r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) + + matches := r.FindStringSubmatch(out.String()) + if len(matches) != 4 { + return errors.New(_conmonVersionFormatErr) + } + major, err := strconv.Atoi(matches[1]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if major < _conmonMinMajorVersion { + return ErrConmonOutdated + } + if major > _conmonMinMajorVersion { + return nil + } + + minor, err := strconv.Atoi(matches[2]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if minor < _conmonMinMinorVersion { + return ErrConmonOutdated + } + if minor > _conmonMinMinorVersion { + return nil + } + + patch, err := strconv.Atoi(matches[3]) + if err != nil { + return errors.Wrap(err, _conmonVersionFormatErr) + } + if patch < _conmonMinPatchVersion { + return ErrConmonOutdated + } + if patch > _conmonMinPatchVersion { + return nil + } + + return nil +} + +// NetNS returns the default network namespace +func (c *Config) NetNS() string { + return c.Containers.NetNS +} + +func (c EngineConfig) EventsLogMaxSize() uint64 { + return uint64(c.EventsLogFileMaxSize) +} + +// SecurityOptions returns the default security options +func (c *Config) SecurityOptions() []string { + securityOpts := []string{} + if c.Containers.SeccompProfile != "" && c.Containers.SeccompProfile != SeccompDefaultPath { + securityOpts = append(securityOpts, fmt.Sprintf("seccomp=%s", c.Containers.SeccompProfile)) + } + if apparmor.IsEnabled() && c.Containers.ApparmorProfile != "" { + securityOpts = append(securityOpts, fmt.Sprintf("apparmor=%s", c.Containers.ApparmorProfile)) + } + if selinux.GetEnabled() && !c.Containers.EnableLabeling { + securityOpts = append(securityOpts, fmt.Sprintf("label=%s", selinux.DisableSecOpt()[0])) + } + return securityOpts +} + +// Sysctls returns the default sysctls +func (c *Config) Sysctls() []string { + return c.Containers.DefaultSysctls +} + +// Volumes returns the default additional volumes for containersvolumes +func (c *Config) Volumes() []string { + return c.Containers.Volumes +} + +// Devices returns the default additional devices for containers +func (c *Config) Devices() []string { + return c.Containers.Devices +} + +// DNSServers returns the default DNS servers to add to resolv.conf in containers +func (c *Config) DNSServers() []string { + return c.Containers.DNSServers +} + +// DNSSerches returns the default DNS searches to add to resolv.conf in containers +func (c *Config) DNSSearches() []string { + return c.Containers.DNSSearches +} + +// DNSOptions returns the default DNS options to add to resolv.conf in containers +func (c *Config) DNSOptions() []string { + return c.Containers.DNSOptions +} + +// Env returns the default additional environment variables to add to containers +func (c *Config) Env() []string { + return c.Containers.Env +} + +// InitPath returns the default init path to add to containers +func (c *Config) InitPath() string { + return c.Containers.InitPath +} + +// IPCNS returns the default IPC Namespace configuration to run containers with +func (c *Config) IPCNS() string { + return c.Containers.IPCNS +} + +// PIDNS returns the default PID Namespace configuration to run containers with +func (c *Config) PidNS() string { + return c.Containers.PidNS +} + +// CgroupNS returns the default Cgroup Namespace configuration to run containers with +func (c *Config) CgroupNS() string { + return c.Containers.CgroupNS +} + +// Cgroups returns whether to containers with cgroup confinement +func (c *Config) Cgroups() string { + return c.Containers.Cgroups +} + +// UTSNS returns the default UTS Namespace configuration to run containers with +func (c *Config) UTSNS() string { + return c.Containers.UTSNS +} + +// ShmSize returns the default size for temporary file systems to use in containers +func (c *Config) ShmSize() string { + return c.Containers.ShmSize +} + +// Ulimits returns the default ulimits to use in containers +func (c *Config) Ulimits() []string { + return c.Containers.DefaultUlimits +} + +// PidsLimit returns the default maximum number of pids to use in containers +func (c *Config) PidsLimit() int64 { + if unshare.IsRootless() { + if c.Engine.CgroupManager != SystemdCgroupsManager { + return 0 + } + cgroup2, _ := cgroupv2.Enabled() + if !cgroup2 { + return 0 + } + } + + return c.Containers.PidsLimit +} + +// DetachKeys returns the default detach keys to detach from a container +func (c *Config) DetachKeys() string { + return c.Engine.DetachKeys +} + +// Tz returns the timezone in the container +func (c *Config) TZ() string { + return c.Containers.TZ +} + +func (c *Config) Umask() string { + return c.Containers.Umask +} + +// LogDriver returns the logging driver to be used +// currently k8s-file or journald +func (c *Config) LogDriver() string { + return c.Containers.LogDriver +} + +// MachineEnabled returns if podman is running inside a VM or not +func (c *Config) MachineEnabled() bool { + return c.Engine.MachineEnabled +} + +// MachineVolumes returns volumes to mount into the VM +func (c *Config) MachineVolumes() ([]string, error) { + return machineVolumes(c.Machine.Volumes) +} + +func machineVolumes(volumes []string) ([]string, error) { + translatedVolumes := []string{} + for _, v := range volumes { + vol := os.ExpandEnv(v) + split := strings.Split(vol, ":") + if len(split) < 2 || len(split) > 3 { + return nil, errors.Errorf("invalid machine volume %s, 2 or 3 fields required", v) + } + if split[0] == "" || split[1] == "" { + return nil, errors.Errorf("invalid machine volume %s, fields must container data", v) + } + translatedVolumes = append(translatedVolumes, vol) + } + return translatedVolumes, nil +} diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go new file mode 100644 index 00000000000..d6ea4359cc3 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -0,0 +1,60 @@ +package config + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +const ( + oldMaxSize = uint64(1048576) +) + +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" +} + +// getDefaultProcessLimits returns the nproc for the current process in ulimits format +// Note that nfile sometimes cannot be set to unlimited, and the limit is hardcoded +// to (oldMaxSize) 1048576 (2^20), see: http://stackoverflow.com/a/1213069/1811501 +// In rootless containers this will fail, and the process will just use its current limits +func getDefaultProcessLimits() []string { + rlim := unix.Rlimit{Cur: oldMaxSize, Max: oldMaxSize} + oldrlim := rlim + // Attempt to set file limit and process limit to pid_max in OS + dat, err := ioutil.ReadFile("/proc/sys/kernel/pid_max") + if err == nil { + val := strings.TrimSuffix(string(dat), "\n") + max, err := strconv.ParseUint(val, 10, 64) + if err == nil { + rlim = unix.Rlimit{Cur: uint64(max), Max: uint64(max)} + } + } + defaultLimits := []string{} + if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max)) + } else if err := unix.Setrlimit(unix.RLIMIT_NPROC, &oldrlim); err == nil { + defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", oldrlim.Cur, oldrlim.Max)) + } + return defaultLimits +} + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go new file mode 100644 index 00000000000..4be8267558d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -0,0 +1,36 @@ +//go:build !linux && !windows +// +build !linux,!windows + +package config + +import "os" + +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" +} + +// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. +func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { + return false, nil +} + +// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format +func getDefaultProcessLimits() []string { + return []string{} +} + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go new file mode 100644 index 00000000000..db230dfb286 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -0,0 +1,34 @@ +package config + +import "os" + +// getDefaultImage returns the default machine image stream +// On Windows this refers to the Fedora major release number +func getDefaultMachineImage() string { + return "35" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "user" +} + +// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. +func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { + return false, nil +} + +// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format +func getDefaultProcessLimits() []string { + return []string{} +} + +// getDefaultTmpDir for windows +func getDefaultTmpDir() string { + // first check the Temp env var + // https://answers.microsoft.com/en-us/windows/forum/all/where-is-the-temporary-folder/44a039a5-45ba-48dd-84db-fd700e54fd56 + if val, ok := os.LookupEnv("TEMP"); ok { + return val + } + return os.Getenv("LOCALAPPDATA") + "\\Temp" +} diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go new file mode 100644 index 00000000000..352fddf92cc --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -0,0 +1,29 @@ +//go:build !systemd || !cgo +// +build !systemd !cgo + +package config + +const ( + // DefaultLogDriver is the default type of log files + DefaultLogDriver = "k8s-file" +) + +func defaultCgroupManager() string { + return CgroupfsCgroupsManager +} + +func defaultEventsLogger() string { + return "file" +} + +func defaultLogDriver() string { + return DefaultLogDriver +} + +func useSystemd() bool { + return false +} + +func useJournald() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go new file mode 100644 index 00000000000..f0f983077dd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go @@ -0,0 +1,36 @@ +package config + +import "fmt" + +// PodExitPolicies includes the supported pod exit policies. +var PodExitPolicies = []string{string(PodExitPolicyContinue), string(PodExitPolicyStop)} + +// PodExitPolicy determines a pod's exit and stop behaviour. +type PodExitPolicy string + +const ( + // PodExitPolicyContinue instructs the pod to continue running when the + // last container has exited. + PodExitPolicyContinue PodExitPolicy = "continue" + // PodExitPolicyStop instructs the pod to stop when the last container + // has exited. + PodExitPolicyStop = "stop" + // PodExitPolicyUnsupported implies an internal error. + // Negative for backwards compat. + PodExitPolicyUnsupported = "invalid" + + defaultPodExitPolicy = PodExitPolicyContinue +) + +// ParsePodExitPolicy parses the specified policy and returns an error if it is +// invalid. +func ParsePodExitPolicy(policy string) (PodExitPolicy, error) { + switch policy { + case "", string(PodExitPolicyContinue): + return PodExitPolicyContinue, nil + case string(PodExitPolicyStop): + return PodExitPolicyStop, nil + default: + return PodExitPolicyUnsupported, fmt.Errorf("invalid pod exit policy: %q", policy) + } +} diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go new file mode 100644 index 00000000000..8c1f0ec2907 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go @@ -0,0 +1,95 @@ +package config + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// PullPolicy determines how and which images are being pulled from a container +// registry (i.e., docker transport only). +// +// Supported string values are: +// * "always" <-> PullPolicyAlways +// * "missing" <-> PullPolicyMissing +// * "newer" <-> PullPolicyNewer +// * "never" <-> PullPolicyNever +type PullPolicy int + +const ( + // Always pull the image. + PullPolicyAlways PullPolicy = iota + // Pull the image only if it could not be found in the local containers + // storage. + PullPolicyMissing + // Never pull the image but use the one from the local containers + // storage. + PullPolicyNever + // Pull if the image on the registry is new than the one in the local + // containers storage. An image is considered to be newer when the + // digests are different. Comparing the time stamps is prone to + // errors. + PullPolicyNewer + + // Ideally this should be the first `ioata` but backwards compatibility + // prevents us from changing the values. + PullPolicyUnsupported = -1 +) + +// String converts a PullPolicy into a string. +// +// Supported string values are: +// * "always" <-> PullPolicyAlways +// * "missing" <-> PullPolicyMissing +// * "newer" <-> PullPolicyNewer +// * "never" <-> PullPolicyNever +func (p PullPolicy) String() string { + switch p { + case PullPolicyAlways: + return "always" + case PullPolicyMissing: + return "missing" + case PullPolicyNewer: + return "newer" + case PullPolicyNever: + return "never" + } + return fmt.Sprintf("unrecognized policy %d", p) +} + +// Validate returns if the pull policy is not supported. +func (p PullPolicy) Validate() error { + switch p { + case PullPolicyAlways, PullPolicyMissing, PullPolicyNewer, PullPolicyNever: + return nil + default: + return errors.Errorf("unsupported pull policy %d", p) + } +} + +// ParsePullPolicy parses the string into a pull policy. +// +// Supported string values are: +// * "always" <-> PullPolicyAlways +// * "missing" <-> PullPolicyMissing (also "ifnotpresent" and "") +// * "newer" <-> PullPolicyNewer (also "ifnewer") +// * "never" <-> PullPolicyNever +func ParsePullPolicy(s string) (PullPolicy, error) { + switch s { + case "always", "Always": + return PullPolicyAlways, nil + case "missing", "Missing", "ifnotpresent", "IfNotPresent", "": + return PullPolicyMissing, nil + case "newer", "Newer", "ifnewer", "IfNewer": + return PullPolicyNewer, nil + case "never", "Never": + return PullPolicyNever, nil + default: + return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s) + } +} + +// Deprecated: please use `ParsePullPolicy` instead. +func ValidatePullPolicy(s string) (PullPolicy, error) { + return ParsePullPolicy(s) +} diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go new file mode 100644 index 00000000000..03d19a12f30 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -0,0 +1,86 @@ +//go:build systemd && cgo +// +build systemd,cgo + +package config + +import ( + "io/ioutil" + "path/filepath" + "strings" + "sync" + + "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/storage/pkg/unshare" +) + +var ( + systemdOnce sync.Once + usesSystemd bool + journaldOnce sync.Once + usesJournald bool +) + +const ( + // DefaultLogDriver is the default type of log files + DefaultLogDriver = "journald" +) + +func defaultCgroupManager() string { + if !useSystemd() { + return CgroupfsCgroupsManager + } + enabled, err := cgroupv2.Enabled() + if err == nil && !enabled && unshare.IsRootless() { + return CgroupfsCgroupsManager + } + + return SystemdCgroupsManager +} + +func defaultEventsLogger() string { + if useJournald() { + return "journald" + } + return "file" +} + +func defaultLogDriver() string { + if useJournald() { + return "journald" + } + return "k8s-file" +} + +func useSystemd() bool { + systemdOnce.Do(func() { + dat, err := ioutil.ReadFile("/proc/1/comm") + if err == nil { + val := strings.TrimSuffix(string(dat), "\n") + usesSystemd = (val == "systemd") + } + }) + return usesSystemd +} + +func useJournald() bool { + journaldOnce.Do(func() { + if !useSystemd() { + return + } + for _, root := range []string{"/run/log/journal", "/var/log/journal"} { + dirs, err := ioutil.ReadDir(root) + if err != nil { + continue + } + for _, d := range dirs { + if d.IsDir() { + if _, err := ioutil.ReadDir(filepath.Join(root, d.Name())); err == nil { + usesJournald = true + return + } + } + } + } + }) + return usesJournald +} diff --git a/vendor/github.com/containers/common/pkg/download/download.go b/vendor/github.com/containers/common/pkg/download/download.go new file mode 100644 index 00000000000..abf4c87739e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/download/download.go @@ -0,0 +1,31 @@ +package download + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// FromURL downloads the specified source to a file in tmpdir (OS defaults if +// empty). +func FromURL(tmpdir, source string) (string, error) { + tmp, err := ioutil.TempFile(tmpdir, "") + if err != nil { + return "", fmt.Errorf("creating temporary download file: %w", err) + } + defer tmp.Close() + + response, err := http.Get(source) // nolint:noctx + if err != nil { + return "", fmt.Errorf("downloading %s: %w", source, err) + } + defer response.Body.Close() + + _, err = io.Copy(tmp, response.Body) + if err != nil { + return "", fmt.Errorf("copying %s to %s: %w", source, tmp.Name(), err) + } + + return tmp.Name(), nil +} diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go new file mode 100644 index 00000000000..e26e056adf9 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -0,0 +1,118 @@ +package filters + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/containers/common/pkg/timetype" + "github.com/pkg/errors" +) + +// ComputeUntilTimestamp extracts until timestamp from filters +func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { + invalid := time.Time{} + if len(filterValues) != 1 { + return invalid, errors.Errorf("specify exactly one timestamp for until") + } + ts, err := timetype.GetTimestamp(filterValues[0], time.Now()) + if err != nil { + return invalid, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return invalid, err + } + return time.Unix(seconds, nanoseconds), nil +} + +// filtersFromRequests extracts the "filters" parameter from the specified +// http.Request. The parameter can either be a `map[string][]string` as done +// in new versions of Docker and libpod, or a `map[string]map[string]bool` as +// done in older versions of Docker. We have to do a bit of Yoga to support +// both - just as Docker does as well. +// +// Please refer to https://github.com/containers/podman/issues/6899 for some +// background. +func FiltersFromRequest(r *http.Request) ([]string, error) { + var ( + compatFilters map[string]map[string]bool + filters map[string][]string + libpodFilters []string + raw []byte + ) + + if _, found := r.URL.Query()["filters"]; found { + raw = []byte(r.Form.Get("filters")) + } else if _, found := r.URL.Query()["Filters"]; found { + raw = []byte(r.Form.Get("Filters")) + } else { + return []string{}, nil + } + + // Backwards compat with older versions of Docker. + if err := json.Unmarshal(raw, &compatFilters); err == nil { + for filterKey, filterMap := range compatFilters { + for filterValue, toAdd := range filterMap { + if toAdd { + libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue)) + } + } + } + return libpodFilters, nil + } + + if err := json.Unmarshal(raw, &filters); err != nil { + return nil, err + } + + for filterKey, filterSlice := range filters { + f := filterKey + for _, filterValue := range filterSlice { + f += "=" + filterValue + } + libpodFilters = append(libpodFilters, f) + } + + return libpodFilters, nil +} + +// PrepareFilters prepares a *map[string][]string of filters to be later searched +// in lipod and compat API to get desired filters +func PrepareFilters(r *http.Request) (map[string][]string, error) { + filtersList, err := FiltersFromRequest(r) + if err != nil { + return nil, err + } + filterMap := map[string][]string{} + for _, filter := range filtersList { + split := strings.SplitN(filter, "=", 2) + if len(split) > 1 { + filterMap[split[0]] = append(filterMap[split[0]], split[1]) + } + } + return filterMap, nil +} + +// MatchLabelFilters matches labels and returns true if they are valid +func MatchLabelFilters(filterValues []string, labels map[string]string) bool { +outer: + for _, filterValue := range filterValues { + filterArray := strings.SplitN(filterValue, "=", 2) + filterKey := filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + for labelKey, labelValue := range labels { + if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) { + continue outer + } + } + return false + } + return true +} diff --git a/vendor/github.com/containers/common/pkg/flag/flag.go b/vendor/github.com/containers/common/pkg/flag/flag.go new file mode 100644 index 00000000000..7d6b6a5343d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/flag/flag.go @@ -0,0 +1,174 @@ +package flag + +import ( + "strconv" + + "github.com/spf13/pflag" +) + +// OptionalBool is a boolean with a separate presence flag and value. +type OptionalBool struct { + present bool + value bool +} + +// Present returns the bool's presence flag. +func (ob *OptionalBool) Present() bool { + return ob.present +} + +// Present returns the bool's value. Should only be used if Present() is true. +func (ob *OptionalBool) Value() bool { + return ob.value +} + +// optionalBool is a cli.Generic == flag.Value implementation equivalent to +// the one underlying flag.Bool, except that it records whether the flag has been set. +// This is distinct from optionalBool to (pretend to) force callers to use +// optionalBoolFlag +type optionalBoolValue OptionalBool + +// OptionalBoolFlag creates new flag for an optional in the specified flag with +// the specified name and usage. +func OptionalBoolFlag(fs *pflag.FlagSet, p *OptionalBool, name, usage string) *pflag.Flag { + flag := fs.VarPF(internalNewOptionalBoolValue(p), name, "", usage) + flag.NoOptDefVal = "true" + flag.DefValue = "false" + return flag +} + +// WARNING: Do not directly use this method to define optionalBool flag. +// Caller should use optionalBoolFlag +func internalNewOptionalBoolValue(p *OptionalBool) pflag.Value { + p.present = false + return (*optionalBoolValue)(p) +} + +// Set parses the string to a bool and sets it. +func (ob *optionalBoolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + ob.value = v + ob.present = true + return nil +} + +// String returns the string representation of the string. +func (ob *optionalBoolValue) String() string { + if !ob.present { + return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true + } + return strconv.FormatBool(ob.value) +} + +// Type returns the type. +func (ob *optionalBoolValue) Type() string { + return "bool" +} + +// IsBoolFlag indicates that it's a bool flag. +func (ob *optionalBoolValue) IsBoolFlag() bool { + return true +} + +// OptionalString is a string with a separate presence flag. +type OptionalString struct { + present bool + value string +} + +// Present returns the strings's presence flag. +func (os *OptionalString) Present() bool { + return os.present +} + +// Present returns the string's value. Should only be used if Present() is true. +func (os *OptionalString) Value() string { + return os.value +} + +// optionalString is a cli.Generic == flag.Value implementation equivalent to +// the one underlying flag.String, except that it records whether the flag has been set. +// This is distinct from optionalString to (pretend to) force callers to use +// newoptionalString +type optionalStringValue OptionalString + +// NewOptionalStringValue returns a pflag.Value for the string. +func NewOptionalStringValue(p *OptionalString) pflag.Value { + p.present = false + return (*optionalStringValue)(p) +} + +// Set sets the string. +func (ob *optionalStringValue) Set(s string) error { + ob.value = s + ob.present = true + return nil +} + +// String returns the string if present. +func (ob *optionalStringValue) String() string { + if !ob.present { + return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""} + } + return ob.value +} + +// Type returns the string type. +func (ob *optionalStringValue) Type() string { + return "string" +} + +// OptionalInt is a int with a separate presence flag. +type OptionalInt struct { + present bool + value int +} + +// Present returns the int's presence flag. +func (oi *OptionalInt) Present() bool { + return oi.present +} + +// Present returns the int's value. Should only be used if Present() is true. +func (oi *OptionalInt) Value() int { + return oi.value +} + +// optionalInt is a cli.Generic == flag.Value implementation equivalent to +// the one underlying flag.Int, except that it records whether the flag has been set. +// This is distinct from optionalInt to (pretend to) force callers to use +// newoptionalIntValue +type optionalIntValue OptionalInt + +// NewOptionalIntValue returns the pflag.Value of the int. +func NewOptionalIntValue(p *OptionalInt) pflag.Value { + p.present = false + return (*optionalIntValue)(p) +} + +// Set parses the string to an int and sets it. +func (ob *optionalIntValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, strconv.IntSize) + if err != nil { + return err + } + ob.value = int(v) + ob.present = true + return nil +} + +// String returns the string representation of the int. +func (ob *optionalIntValue) String() string { + if !ob.present { + return "" // If the value is not present, just return an empty string, any other value wouldn't make sense. + } + return strconv.Itoa(int(ob.value)) +} + +// Type returns the int's type. +func (ob *optionalIntValue) Type() string { + return "int" +} diff --git a/vendor/github.com/containers/common/pkg/machine/machine.go b/vendor/github.com/containers/common/pkg/machine/machine.go new file mode 100644 index 00000000000..465eeceaf1b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/machine/machine.go @@ -0,0 +1,70 @@ +package machine + +import ( + "os" + "strings" + "sync" + + "github.com/containers/common/pkg/config" + "github.com/sirupsen/logrus" +) + +type MachineMarker struct { + Enabled bool + Type string +} + +const ( + markerFile = "/etc/containers/podman-machine" + Wsl = "wsl" + Qemu = "qemu" +) + +var ( + markerSync sync.Once + machineMarker *MachineMarker +) + +func loadMachineMarker(file string) { + var kind string + + // Support deprecated config value for compatibility + enabled := isLegacyConfigSet() + + if content, err := os.ReadFile(file); err == nil { + enabled = true + kind = strings.TrimSpace(string(content)) + } + + machineMarker = &MachineMarker{enabled, kind} +} + +func isLegacyConfigSet() bool { + config, err := config.Default() + if err != nil { + logrus.Warnf("could not obtain container configuration") + return false + } + + //nolint:staticcheck //lint:ignore SA1019 deprecated call + return config.Engine.MachineEnabled +} + +func IsPodmanMachine() bool { + return GetMachineMarker().Enabled +} + +func MachineHostType() string { + return GetMachineMarker().Type +} + +func IsGvProxyBased() bool { + return IsPodmanMachine() && MachineHostType() != Wsl +} + +func GetMachineMarker() *MachineMarker { + markerSync.Do(func() { + loadMachineMarker(markerFile) + }) + return machineMarker +} diff --git a/vendor/github.com/containers/common/pkg/manifests/errors.go b/vendor/github.com/containers/common/pkg/manifests/errors.go new file mode 100644 index 00000000000..8398d7efcfe --- /dev/null +++ b/vendor/github.com/containers/common/pkg/manifests/errors.go @@ -0,0 +1,16 @@ +package manifests + +import ( + "errors" +) + +var ( + // ErrDigestNotFound is returned when we look for an image instance + // with a particular digest in a list or index, and fail to find it. + ErrDigestNotFound = errors.New("no image instance matching the specified digest was found in the list or index") + // ErrManifestTypeNotSupported is returned when we attempt to parse a + // manifest with a known MIME type as a list or index, or when we attempt + // to serialize a list or index to a manifest with a MIME type that we + // don't know how to encode. + ErrManifestTypeNotSupported = errors.New("manifest type not supported") +) diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go new file mode 100644 index 00000000000..75ffac06c36 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -0,0 +1,485 @@ +package manifests + +import ( + "encoding/json" + "os" + + "github.com/containers/image/v5/manifest" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// List is a generic interface for manipulating a manifest list or an image +// index. +type List interface { + AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, os, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error + Remove(instanceDigest digest.Digest) error + SetURLs(instanceDigest digest.Digest, urls []string) error + URLs(instanceDigest digest.Digest) ([]string, error) + SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error + Annotations(instanceDigest *digest.Digest) (map[string]string, error) + SetOS(instanceDigest digest.Digest, os string) error + OS(instanceDigest digest.Digest) (string, error) + SetArchitecture(instanceDigest digest.Digest, arch string) error + Architecture(instanceDigest digest.Digest) (string, error) + SetOSVersion(instanceDigest digest.Digest, osVersion string) error + OSVersion(instanceDigest digest.Digest) (string, error) + SetVariant(instanceDigest digest.Digest, variant string) error + Variant(instanceDigest digest.Digest) (string, error) + SetFeatures(instanceDigest digest.Digest, features []string) error + Features(instanceDigest digest.Digest) ([]string, error) + SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error + OSFeatures(instanceDigest digest.Digest) ([]string, error) + Serialize(mimeType string) ([]byte, error) + Instances() []digest.Digest + OCIv1() *v1.Index + Docker() *manifest.Schema2List + + findDocker(instanceDigest digest.Digest) (*manifest.Schema2ManifestDescriptor, error) + findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) +} + +type list struct { + docker manifest.Schema2List + oci v1.Index +} + +// OCIv1 returns the list as a Docker schema 2 list. The returned structure should NOT be modified. +func (l *list) Docker() *manifest.Schema2List { + return &l.docker +} + +// OCIv1 returns the list as an OCI image index. The returned structure should NOT be modified. +func (l *list) OCIv1() *v1.Index { + return &l.oci +} + +// Create creates a new list. +func Create() List { + return &list{ + docker: manifest.Schema2List{ + SchemaVersion: 2, + MediaType: manifest.DockerV2ListMediaType, + }, + oci: v1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, + }, + } +} + +// AddInstance adds an entry for the specified manifest digest, with assorted +// additional information specified in parameters, to the list or index. +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { + if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) { + return err + } + + schema2platform := manifest.Schema2PlatformSpec{ + Architecture: architecture, + OS: osName, + OSVersion: osVersion, + OSFeatures: osFeatures, + Variant: variant, + Features: features, + } + l.docker.Manifests = append(l.docker.Manifests, manifest.Schema2ManifestDescriptor{ + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: manifestType, + Size: manifestSize, + Digest: manifestDigest, + }, + Platform: schema2platform, + }) + + ociv1platform := v1.Platform{ + Architecture: architecture, + OS: osName, + OSVersion: osVersion, + OSFeatures: osFeatures, + Variant: variant, + } + l.oci.Manifests = append(l.oci.Manifests, v1.Descriptor{ + MediaType: manifestType, + Size: manifestSize, + Digest: manifestDigest, + Platform: &ociv1platform, + }) + + return nil +} + +// Remove filters out any instances in the list which match the specified digest. +func (l *list) Remove(instanceDigest digest.Digest) error { + err := errors.Wrapf(os.ErrNotExist, "no instance matching digest %q found in manifest list", instanceDigest) + newDockerManifests := make([]manifest.Schema2ManifestDescriptor, 0, len(l.docker.Manifests)) + for i := range l.docker.Manifests { + if l.docker.Manifests[i].Digest != instanceDigest { + newDockerManifests = append(newDockerManifests, l.docker.Manifests[i]) + } else { + err = nil + } + } + l.docker.Manifests = newDockerManifests + newOCIv1Manifests := make([]v1.Descriptor, 0, len(l.oci.Manifests)) + for i := range l.oci.Manifests { + if l.oci.Manifests[i].Digest != instanceDigest { + newOCIv1Manifests = append(newOCIv1Manifests, l.oci.Manifests[i]) + } else { + err = nil + } + } + l.oci.Manifests = newOCIv1Manifests + return err +} + +func (l *list) findDocker(instanceDigest digest.Digest) (*manifest.Schema2ManifestDescriptor, error) { + for i := range l.docker.Manifests { + if l.docker.Manifests[i].Digest == instanceDigest { + return &l.docker.Manifests[i], nil + } + } + return nil, errors.Wrapf(ErrDigestNotFound, "no Docker manifest matching digest %q was found in list", instanceDigest.String()) +} + +func (l *list) findOCIv1(instanceDigest digest.Digest) (*v1.Descriptor, error) { + for i := range l.oci.Manifests { + if l.oci.Manifests[i].Digest == instanceDigest { + return &l.oci.Manifests[i], nil + } + } + return nil, errors.Wrapf(ErrDigestNotFound, "no OCI manifest matching digest %q was found in list", instanceDigest.String()) +} + +// SetURLs sets the URLs where the manifest might also be found. +func (l *list) SetURLs(instanceDigest digest.Digest, urls []string) error { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci.URLs = append([]string{}, urls...) + docker.URLs = append([]string{}, urls...) + return nil +} + +// URLs retrieves the locations from which this object might possibly be downloaded. +func (l *list) URLs(instanceDigest digest.Digest) ([]string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, oci.URLs...), nil +} + +// SetAnnotations sets annotations on the image index, or on a specific manifest. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error { + a := &l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return err + } + a = &oci.Annotations + } + (*a) = make(map[string]string) + for k, v := range annotations { + (*a)[k] = v + } + return nil +} + +// Annotations retrieves the annotations which have been set on the image index, or on one instance. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) Annotations(instanceDigest *digest.Digest) (map[string]string, error) { + a := l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return nil, err + } + a = oci.Annotations + } + annotations := make(map[string]string) + for k, v := range a { + annotations[k] = v + } + return annotations, nil +} + +// SetOS sets the OS field in the platform information associated with the instance with the specified digest. +func (l *list) SetOS(instanceDigest digest.Digest, os string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OS = os + oci.Platform.OS = os + return nil +} + +// OS retrieves the OS field in the platform information associated with the instance with the specified digest. +func (l *list) OS(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.OS, nil +} + +// SetArchitecture sets the Architecture field in the platform information associated with the instance with the specified digest. +func (l *list) SetArchitecture(instanceDigest digest.Digest, arch string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.Architecture = arch + oci.Platform.Architecture = arch + return nil +} + +// Architecture retrieves the Architecture field in the platform information associated with the instance with the specified digest. +func (l *list) Architecture(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.Architecture, nil +} + +// SetOSVersion sets the OSVersion field in the platform information associated with the instance with the specified digest. +func (l *list) SetOSVersion(instanceDigest digest.Digest, osVersion string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OSVersion = osVersion + oci.Platform.OSVersion = osVersion + return nil +} + +// OSVersion retrieves the OSVersion field in the platform information associated with the instance with the specified digest. +func (l *list) OSVersion(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.OSVersion, nil +} + +// SetVariant sets the Variant field in the platform information associated with the instance with the specified digest. +func (l *list) SetVariant(instanceDigest digest.Digest, variant string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.Variant = variant + oci.Platform.Variant = variant + return nil +} + +// Variant retrieves the Variant field in the platform information associated with the instance with the specified digest. +func (l *list) Variant(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.Platform.Variant, nil +} + +// SetFeatures sets the features list in the platform information associated with the instance with the specified digest. +// The field is specific to the Docker manifest list format, and is not present in OCI's image indexes. +func (l *list) SetFeatures(instanceDigest digest.Digest, features []string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + docker.Platform.Features = append([]string{}, features...) + // no OCI equivalent + return nil +} + +// Features retrieves the features list from the platform information associated with the instance with the specified digest. +// The field is specific to the Docker manifest list format, and is not present in OCI's image indexes. +func (l *list) Features(instanceDigest digest.Digest) ([]string, error) { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, docker.Platform.Features...), nil +} + +// SetOSFeatures sets the OS features list in the platform information associated with the instance with the specified digest. +func (l *list) SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error { + docker, err := l.findDocker(instanceDigest) + if err != nil { + return err + } + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + docker.Platform.OSFeatures = append([]string{}, osFeatures...) + oci.Platform.OSFeatures = append([]string{}, osFeatures...) + return nil +} + +// OSFeatures retrieves the OS features list from the platform information associated with the instance with the specified digest. +func (l *list) OSFeatures(instanceDigest digest.Digest) ([]string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return nil, err + } + return append([]string{}, oci.Platform.OSFeatures...), nil +} + +// FromBlob builds a list from an encoded manifest list or image index. +func FromBlob(manifestBytes []byte) (List, error) { + manifestType := manifest.GuessMIMEType(manifestBytes) + list := &list{ + docker: manifest.Schema2List{ + SchemaVersion: 2, + MediaType: manifest.DockerV2ListMediaType, + }, + oci: v1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, + }, + } + switch manifestType { + default: + return nil, errors.Wrapf(ErrManifestTypeNotSupported, "unable to load manifest list: unsupported format %q", manifestType) + case manifest.DockerV2ListMediaType: + if err := json.Unmarshal(manifestBytes, &list.docker); err != nil { + return nil, errors.Wrapf(err, "unable to parse Docker manifest list from image") + } + for _, m := range list.docker.Manifests { + list.oci.Manifests = append(list.oci.Manifests, v1.Descriptor{ + MediaType: m.Schema2Descriptor.MediaType, + Size: m.Schema2Descriptor.Size, + Digest: m.Schema2Descriptor.Digest, + Platform: &v1.Platform{ + Architecture: m.Platform.Architecture, + OS: m.Platform.OS, + OSVersion: m.Platform.OSVersion, + OSFeatures: m.Platform.OSFeatures, + Variant: m.Platform.Variant, + }, + }) + } + case v1.MediaTypeImageIndex: + if err := json.Unmarshal(manifestBytes, &list.oci); err != nil { + return nil, errors.Wrapf(err, "unable to parse OCIv1 manifest list") + } + for _, m := range list.oci.Manifests { + platform := m.Platform + if platform == nil { + platform = &v1.Platform{} + } + list.docker.Manifests = append(list.docker.Manifests, manifest.Schema2ManifestDescriptor{ + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: m.MediaType, + Size: m.Size, + Digest: m.Digest, + }, + Platform: manifest.Schema2PlatformSpec{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: platform.Variant, + }, + }) + } + } + return list, nil +} + +func (l *list) preferOCI() bool { + // If we have any data that's only in the OCI format, use that. + for _, m := range l.oci.Manifests { + if len(m.URLs) > 0 { + return true + } + if len(m.Annotations) > 0 { + return true + } + } + // If we have any data that's only in the Docker format, use that. + for _, m := range l.docker.Manifests { + if len(m.Platform.Features) > 0 { + return false + } + } + // If we have no manifests, remember that the Docker format is + // explicitly typed, so use that. Otherwise, default to using the OCI + // format. + return len(l.docker.Manifests) != 0 +} + +// Serialize encodes the list using the specified format, or by selecting one +// which it thinks is appropriate. +func (l *list) Serialize(mimeType string) ([]byte, error) { + var ( + res []byte + err error + ) + switch mimeType { + case "": + if l.preferOCI() { + res, err = json.Marshal(&l.oci) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling OCI image index") + } + } else { + res, err = json.Marshal(&l.docker) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + } + } + case v1.MediaTypeImageIndex: + res, err = json.Marshal(&l.oci) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling OCI image index") + } + case manifest.DockerV2ListMediaType: + res, err = json.Marshal(&l.docker) + if err != nil { + return nil, errors.Wrapf(err, "error marshalling Docker manifest list") + } + default: + return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType) + } + return res, nil +} + +// Instances returns the list of image instances mentioned in this list. +func (l *list) Instances() []digest.Digest { + instances := make([]digest.Digest, 0, len(l.oci.Manifests)) + for _, instance := range l.oci.Manifests { + instances = append(instances, instance.Digest) + } + return instances +} diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go new file mode 100644 index 00000000000..6c4958cc2b6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -0,0 +1,185 @@ +package parse + +// this package contains functions that parse and validate +// user input and is shared either amongst container engine subcommands + +import ( + "os" + "path" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +// ValidateVolumeOpts validates a volume's options +func ValidateVolumeOpts(options []string) ([]string, error) { + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int + finalOpts := make([]string, 0, len(options)) + for _, opt := range options { + // support advanced options like upperdir=/path, workdir=/path + if strings.Contains(opt, "upperdir") { + foundUpperDir++ + if foundUpperDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.Contains(opt, "workdir") { + foundWorkDir++ + if foundWorkDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.HasPrefix(opt, "idmap") { + finalOpts = append(finalOpts, opt) + continue + } + + switch opt { + case "noexec", "exec": + foundExec++ + if foundExec > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'noexec' or 'exec' option", strings.Join(options, ", ")) + } + case "nodev", "dev": + foundDev++ + if foundDev > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'nodev' or 'dev' option", strings.Join(options, ", ")) + } + case "nosuid", "suid": + foundSuid++ + if foundSuid > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'nosuid' or 'suid' option", strings.Join(options, ", ")) + } + case "rw", "ro": + foundRWRO++ + if foundRWRO > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", strings.Join(options, ", ")) + } + case "z", "Z", "O": + foundLabelChange++ + if foundLabelChange > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", strings.Join(options, ", ")) + } + case "U": + foundChown++ + if foundChown > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 'U' option", strings.Join(options, ", ")) + } + case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable": + foundRootPropagation++ + if foundRootPropagation > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]shared', '[r]private', '[r]slave' or '[r]unbindable' option", strings.Join(options, ", ")) + } + case "bind", "rbind": + bindType++ + if bindType > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 '[r]bind' option", strings.Join(options, ", ")) + } + case "cached", "delegated": + // The discarded ops are OS X specific volume options + // introduced in a recent Docker version. + // They have no meaning on Linux, so here we silently + // drop them. This matches Docker's behavior (the options + // are intended to be always safe to use, even not on OS + // X). + continue + default: + return nil, errors.Errorf("invalid option type %q", opt) + } + finalOpts = append(finalOpts, opt) + } + return finalOpts, nil +} + +// Device parses device mapping string to a src, dest & permissions string +// Valid values for device looklike: +// '/dev/sdc" +// '/dev/sdc:/dev/xvdc" +// '/dev/sdc:/dev/xvdc:rwm" +// '/dev/sdc:rm" +func Device(device string) (src, dest, permissions string, err error) { + permissions = "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + if !isValidDeviceMode(arr[2]) { + return "", "", "", errors.Errorf("invalid device mode: %s", arr[2]) + } + permissions = arr[2] + fallthrough + case 2: + if isValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + if arr[1] == "" || arr[1][0] != '/' { + return "", "", "", errors.Errorf("invalid device mode: %s", arr[1]) + } + dest = arr[1] + } + fallthrough + case 1: + if len(arr[0]) > 0 { + src = arr[0] + break + } + fallthrough + default: + return "", "", "", errors.Errorf("invalid device specification: %s", device) + } + + if dest == "" { + dest = src + } + return src, dest, permissions, nil +} + +// isValidDeviceMode checks if the mode for device is valid or not. +// isValid mode is a composition of r (read), w (write), and m (mknod). +func isValidDeviceMode(mode string) bool { + legalDeviceMode := map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// ValidateVolumeHostDir validates a volume mount's source directory +func ValidateVolumeHostDir(hostDir string) error { + if hostDir == "" { + return errors.New("host directory cannot be empty") + } + if filepath.IsAbs(hostDir) { + if _, err := os.Stat(hostDir); err != nil { + return err + } + } + // If hostDir is not an absolute path, that means the user wants to create a + // named volume. This will be done later on in the code. + return nil +} + +// ValidateVolumeCtrDir validates a volume mount's destination directory. +func ValidateVolumeCtrDir(ctrDir string) error { + if ctrDir == "" { + return errors.New("container directory cannot be empty") + } + if !path.IsAbs(ctrDir) { + return errors.Errorf("invalid container path %q, must be an absolute path", ctrDir) + } + return nil +} diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go new file mode 100644 index 00000000000..d087c4a02b6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -0,0 +1,51 @@ +//go:build linux || darwin +// +build linux darwin + +package parse + +import ( + "os" + "path/filepath" + + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/pkg/errors" +) + +func DeviceFromPath(device string) ([]devices.Device, error) { + var devs []devices.Device + src, dst, permissions, err := Device(device) + if err != nil { + return nil, err + } + if unshare.IsRootless() && src != dst { + return nil, errors.Errorf("Renaming device %s to %s is not supported in rootless containers", src, dst) + } + srcInfo, err := os.Stat(src) + if err != nil { + return nil, err + } + + if !srcInfo.IsDir() { + + dev, err := devices.DeviceFromPath(src, permissions) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid device", src) + } + dev.Path = dst + devs = append(devs, *dev) + return devs, nil + } + + // If source device is a directory + srcDevices, err := devices.GetDevices(src) + if err != nil { + return nil, errors.Wrapf(err, "error getting source devices from directory %s", src) + } + for _, d := range srcDevices { + d.Path = filepath.Join(dst, filepath.Base(d.Path)) + d.Permissions = devices.Permissions(permissions) + devs = append(devs, *d) + } + return devs, nil +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go new file mode 100644 index 00000000000..a9573e4e8a8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -0,0 +1,108 @@ +package retry + +import ( + "context" + "io" + "math" + "net" + "net/url" + "syscall" + "time" + + "github.com/docker/distribution/registry/api/errcode" + errcodev2 "github.com/docker/distribution/registry/api/v2" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// RetryOptions defines the option to retry +type RetryOptions struct { + MaxRetry int // The number of times to possibly retry + Delay time.Duration // The delay to use between retries, if set +} + +// RetryIfNecessary retries the operation in exponential backoff with the retryOptions +func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error { + err := operation() + for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ { + delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second + if retryOptions.Delay != 0 { + delay = retryOptions.Delay + } + logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err) + select { + case <-time.After(delay): + break + case <-ctx.Done(): + return err + } + err = operation() + } + return err +} + +func isRetryable(err error) bool { + err = errors.Cause(err) + + switch err { + case nil: + return false + case context.Canceled, context.DeadlineExceeded: + return false + default: // continue + } + + type unwrapper interface { + Unwrap() error + } + + switch e := err.(type) { + + case errcode.Error: + switch e.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied, + errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: + return false + } + return true + case *net.OpError: + return isRetryable(e.Err) + case *url.Error: // This includes errors returned by the net/http client. + if e.Err == io.EOF { // Happens when a server accepts a HTTP connection and sends EOF + return true + } + return isRetryable(e.Err) + case syscall.Errno: + return isErrnoRetryable(e) + case errcode.Errors: + // if this error is a group of errors, process them all in turn + for i := range e { + if !isRetryable(e[i]) { + return false + } + } + return true + case *multierror.Error: + // if this error is a group of errors, process them all in turn + for i := range e.Errors { + if !isRetryable(e.Errors[i]) { + return false + } + } + return true + case unwrapper: // Test this last, because various error types might implement .Unwrap() + err = e.Unwrap() + return isRetryable(err) + } + + return false +} + +func isErrnoRetryable(e error) bool { + switch e { + case syscall.ECONNREFUSED, syscall.EINTR, syscall.EAGAIN, syscall.EBUSY, syscall.ENETDOWN, syscall.ENETUNREACH, syscall.ENETRESET, syscall.ECONNABORTED, syscall.ECONNRESET, syscall.ETIMEDOUT, syscall.EHOSTDOWN, syscall.EHOSTUNREACH: + return true + } + return isErrnoERESTART(e) +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry_linux.go b/vendor/github.com/containers/common/pkg/retry/retry_linux.go new file mode 100644 index 00000000000..b7942f7f4e0 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry_linux.go @@ -0,0 +1,9 @@ +package retry + +import ( + "syscall" +) + +func isErrnoERESTART(e error) bool { + return e == syscall.ERESTART +} diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go new file mode 100644 index 00000000000..901e28a5dcc --- /dev/null +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package retry + +func isErrnoERESTART(e error) bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_common.go b/vendor/github.com/containers/common/pkg/signal/signal_common.go new file mode 100644 index 00000000000..7c266290945 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_common.go @@ -0,0 +1,41 @@ +package signal + +import ( + "fmt" + "strconv" + "strings" + "syscall" +) + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("invalid signal: %s", rawSignal) + } + return sig, nil +} + +// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input +// can be a name or number representation i.e. "KILL" "9" +func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) { + basename := strings.TrimPrefix(rawSignal, "-") + s, err := ParseSignal(basename) + if err == nil { + return s, nil + } + for k, v := range signalMap { + if strings.EqualFold(k, basename) { + return v, nil + } + } + return -1, fmt.Errorf("invalid signal: %s", basename) +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go new file mode 100644 index 00000000000..21e09c9fef0 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go @@ -0,0 +1,108 @@ +//go:build linux && !mips && !mipsle && !mips64 && !mips64le +// +build linux,!mips,!mipsle,!mips64,!mips64le + +// Signal handling for Linux only. +package signal + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" + "os/signal" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.SIGWINCH // For cross-compilation with Windows +) + +// signalMap is a map of Linux signals. +var signalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := make([]os.Signal, 0, len(signalMap)) + for _, s := range signalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go new file mode 100644 index 00000000000..52b07aaf463 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go @@ -0,0 +1,109 @@ +//go:build linux && (mips || mipsle || mips64 || mips64le) +// +build linux +// +build mips mipsle mips64 mips64le + +// Special signal handling for mips architecture +package signal + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" + "os/signal" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 127 + + SIGWINCH = syscall.SIGWINCH +) + +// signalMap is a map of Linux signals. +var signalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "EMT": unix.SIGEMT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := make([]os.Signal, 0, len(signalMap)) + for _, s := range signalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go new file mode 100644 index 00000000000..0e8685a7c56 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go @@ -0,0 +1,100 @@ +//go:build !linux +// +build !linux + +// Signal handling for Linux only. +package signal + +import ( + "os" + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.Signal(0xff) +) + +// signalMap is a map of Linux signals. +// These constants are sourced from the Linux version of golang.org/x/sys/unix +// (I don't see much risk of this changing). +// This should work as long as Podman only runs containers on Linux, which seems +// a safe assumption for now. +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.Signal(0x6), + "ALRM": syscall.Signal(0xe), + "BUS": syscall.Signal(0x7), + "CHLD": syscall.Signal(0x11), + "CLD": syscall.Signal(0x11), + "CONT": syscall.Signal(0x12), + "FPE": syscall.Signal(0x8), + "HUP": syscall.Signal(0x1), + "ILL": syscall.Signal(0x4), + "INT": syscall.Signal(0x2), + "IO": syscall.Signal(0x1d), + "IOT": syscall.Signal(0x6), + "KILL": syscall.Signal(0x9), + "PIPE": syscall.Signal(0xd), + "POLL": syscall.Signal(0x1d), + "PROF": syscall.Signal(0x1b), + "PWR": syscall.Signal(0x1e), + "QUIT": syscall.Signal(0x3), + "SEGV": syscall.Signal(0xb), + "STKFLT": syscall.Signal(0x10), + "STOP": syscall.Signal(0x13), + "SYS": syscall.Signal(0x1f), + "TERM": syscall.Signal(0xf), + "TRAP": syscall.Signal(0x5), + "TSTP": syscall.Signal(0x14), + "TTIN": syscall.Signal(0x15), + "TTOU": syscall.Signal(0x16), + "URG": syscall.Signal(0x17), + "USR1": syscall.Signal(0xa), + "USR2": syscall.Signal(0xc), + "VTALRM": syscall.Signal(0x1a), + "WINCH": syscall.Signal(0x1c), + "XCPU": syscall.Signal(0x18), + "XFSZ": syscall.Signal(0x19), + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} diff --git a/vendor/github.com/containers/common/pkg/supplemented/errors.go b/vendor/github.com/containers/common/pkg/supplemented/errors.go new file mode 100644 index 00000000000..a031951f152 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/supplemented/errors.go @@ -0,0 +1,17 @@ +package supplemented + +import ( + "errors" + + "github.com/containers/common/pkg/manifests" +) + +var ( + // ErrDigestNotFound is returned when we look for an image instance + // with a particular digest in a list or index, and fail to find it. + ErrDigestNotFound = manifests.ErrDigestNotFound + // ErrBlobNotFound is returned when try to figure out which supplemental + // image we should ask for a blob with the specified characteristics, + // based on the information in each of the supplemental images' manifests. + ErrBlobNotFound = errors.New("location of blob could not be determined") +) diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go new file mode 100644 index 00000000000..196176a1c6c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -0,0 +1,402 @@ +package supplemented + +import ( + "container/list" + "context" + "io" + + cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + multierror "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// supplementedImageReference groups multiple references together. +type supplementedImageReference struct { + types.ImageReference + references []types.ImageReference + multiple cp.ImageListSelection + instances []digest.Digest +} + +// supplementedImageSource represents an image, plus all of the blobs of other images. +type supplementedImageSource struct { + types.ImageSource + reference types.ImageReference + manifest []byte // The manifest list or image index. + manifestType string // The MIME type of the manifest list or image index. + sourceDefaultInstances map[types.ImageSource]digest.Digest // The default manifest instances of open ImageSource objects. + sourceInstancesByInstance map[digest.Digest]types.ImageSource // A map from manifest instance digests to open ImageSource objects. + instancesByBlobDigest map[digest.Digest]digest.Digest // A map from blob digests to manifest instance digests. +} + +// Reference groups one reference and some number of additional references +// together as a group. The first reference's default instance will be treated +// as the default instance of the resulting reference, with the other +// references' instances made available as instances for their respective +// digests. +func Reference(ref types.ImageReference, supplemental []types.ImageReference, multiple cp.ImageListSelection, instances []digest.Digest) types.ImageReference { + if len(instances) > 0 { + i := make([]digest.Digest, len(instances)) + copy(i, instances) + instances = i + } + return &supplementedImageReference{ + ImageReference: ref, + references: append([]types.ImageReference{}, supplemental...), + multiple: multiple, + instances: instances, + } +} + +// NewImage returns a new higher-level view of the image. +func (s *supplementedImageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := s.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error building a new Image using an ImageSource") + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource opens the referenced images, scans their manifests for +// instances, and builds mappings from each blob mentioned in them to their +// instances. +func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (iss types.ImageSource, err error) { + sources := make(map[digest.Digest]types.ImageSource) + defaultInstances := make(map[types.ImageSource]digest.Digest) + instances := make(map[digest.Digest]digest.Digest) + var sis *supplementedImageSource + + // Open the default instance for reading. + top, err := s.ImageReference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(s.ImageReference)) + } + + defer func() { + if err != nil { + if iss != nil { + // The composite source has been created. Use its Close method. + if err2 := iss.Close(); err2 != nil { + logrus.Errorf("Opening image: %v", err2) + } + } else if top != nil { + // The composite source has not been created, but the top was already opened. Close it. + if err2 := top.Close(); err2 != nil { + logrus.Errorf("Closing image: %v", err2) + } + } + } + }() + + var addSingle, addMulti func(manifestBytes []byte, manifestType string, src types.ImageSource) error + type manifestToRead struct { + src types.ImageSource + instance *digest.Digest + } + manifestsToRead := list.New() + + addSingle = func(manifestBytes []byte, manifestType string, src types.ImageSource) error { + // Mark this instance as being associated with this ImageSource. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing digest over manifest %q", string(manifestBytes)) + } + sources[manifestDigest] = src + + // Parse the manifest as a single image. + man, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return errors.Wrapf(err, "error parsing manifest %q", string(manifestBytes)) + } + + // Log the config blob's digest and the blobs of its layers as associated with this manifest. + config := man.ConfigInfo() + if config.Digest != "" { + instances[config.Digest] = manifestDigest + logrus.Debugf("blob %q belongs to %q", config.Digest, manifestDigest) + } + + layers := man.LayerInfos() + for _, layer := range layers { + instances[layer.Digest] = manifestDigest + logrus.Debugf("layer %q belongs to %q", layer.Digest, manifestDigest) + } + + return nil + } + + addMulti = func(manifestBytes []byte, manifestType string, src types.ImageSource) error { + // Mark this instance as being associated with this ImageSource. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return errors.Wrapf(err, "error computing manifest digest") + } + sources[manifestDigest] = src + + // Parse the manifest as a list of images. + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(manifestBytes), manifestType) + } + + // Figure out which of its instances we want to look at. + var chaseInstances []digest.Digest + switch s.multiple { + case cp.CopySystemImage: + instance, err := list.ChooseInstance(sys) + if err != nil { + return errors.Wrapf(err, "error selecting appropriate instance from list") + } + chaseInstances = []digest.Digest{instance} + case cp.CopySpecificImages: + chaseInstances = s.instances + case cp.CopyAllImages: + chaseInstances = list.Instances() + } + + // Queue these manifest instances for reading from this + // ImageSource later, if we don't stumble across them somewhere + // else first. + for _, instanceIterator := range chaseInstances { + instance := instanceIterator + next := &manifestToRead{ + src: src, + instance: &instance, + } + if src == top { + // Prefer any other source. + manifestsToRead.PushBack(next) + } else { + // Prefer this source over the first ("main") one. + manifestsToRead.PushFront(next) + } + } + return nil + } + + visitedReferences := make(map[types.ImageReference]struct{}) + for i, ref := range append([]types.ImageReference{s.ImageReference}, s.references...) { + if _, visited := visitedReferences[ref]; visited { + continue + } + visitedReferences[ref] = struct{}{} + + // Open this image for reading. + var src types.ImageSource + if ref == s.ImageReference { + src = top + } else { + src, err = ref.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error opening %q as image source", transports.ImageName(ref)) + } + } + + // Read the default manifest for the image. + manifestBytes, manifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return nil, errors.Wrapf(err, "error reading default manifest from image %q", transports.ImageName(ref)) + } + + // If this is the first image, mark it as our starting point. + if i == 0 { + sources[""] = src + + sis = &supplementedImageSource{ + ImageSource: top, + reference: s, + manifest: manifestBytes, + manifestType: manifestType, + sourceDefaultInstances: defaultInstances, + sourceInstancesByInstance: sources, + instancesByBlobDigest: instances, + } + iss = sis + } + + // Record the digest of the ImageSource's default instance's manifest. + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "error computing digest of manifest from image %q", transports.ImageName(ref)) + } + sis.sourceDefaultInstances[src] = manifestDigest + + // If the ImageSource's default manifest is a list, parse each of its instances. + if manifest.MIMETypeIsMultiImage(manifestType) { + if err = addMulti(manifestBytes, manifestType, src); err != nil { + return nil, errors.Wrapf(err, "error adding multi-image %q", transports.ImageName(ref)) + } + } else { + if err = addSingle(manifestBytes, manifestType, src); err != nil { + return nil, errors.Wrapf(err, "error adding single image %q", transports.ImageName(ref)) + } + } + } + + // Parse the rest of the instances. + for manifestsToRead.Front() != nil { + front := manifestsToRead.Front() + value := front.Value + manifestToRead, ok := value.(*manifestToRead) + if !ok { + panic("bug: wrong type looking for *manifestToRead in list?") + } + manifestsToRead.Remove(front) + + // If we already read this manifest, no need to read it again. + if _, alreadyRead := sources[*manifestToRead.instance]; alreadyRead { + continue + } + + // Read the instance's manifest. + manifestBytes, manifestType, err := manifestToRead.src.GetManifest(ctx, manifestToRead.instance) + if err != nil { + // if errors.Cause(err) == storage.ErrImageUnknown || os.IsNotExist(errors.Cause(err)) { + // Trust that we either don't need it, or that it's in another reference. + // continue + // } + return nil, errors.Wrapf(err, "error reading manifest for instance %q", manifestToRead.instance) + } + + if manifest.MIMETypeIsMultiImage(manifestType) { + // Add the list's contents. + if err = addMulti(manifestBytes, manifestType, manifestToRead.src); err != nil { + return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + } + } else { + // Add the single image's contents. + if err = addSingle(manifestBytes, manifestType, manifestToRead.src); err != nil { + return nil, errors.Wrapf(err, "error adding single image instance %q", manifestToRead.instance) + } + } + } + + return iss, nil +} + +func (s *supplementedImageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("deletion of images not implemented") +} + +func (s *supplementedImageSource) Close() error { + var returnErr *multierror.Error + closed := make(map[types.ImageSource]struct{}) + for _, sourceInstance := range s.sourceInstancesByInstance { + if _, closed := closed[sourceInstance]; closed { + continue + } + if err := sourceInstance.Close(); err != nil { + returnErr = multierror.Append(returnErr, err) + } + closed[sourceInstance] = struct{}{} + } + if returnErr == nil { + return nil + } + return returnErr.ErrorOrNil() +} + +func (s *supplementedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + requestInstanceDigest := instanceDigest + if instanceDigest == nil { + return s.manifest, s.manifestType, nil + } + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + if *instanceDigest == s.sourceDefaultInstances[sourceInstance] { + requestInstanceDigest = nil + } + return sourceInstance.GetManifest(ctx, requestInstanceDigest) + } + return nil, "", errors.Wrapf(ErrDigestNotFound, "error getting manifest for digest %q", *instanceDigest) +} + +func (s *supplementedImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, bic types.BlobInfoCache) (io.ReadCloser, int64, error) { + sourceInstance, ok := s.instancesByBlobDigest[blob.Digest] + if !ok { + return nil, -1, errors.Wrapf(ErrBlobNotFound, "error blob %q in known instances", blob.Digest) + } + src, ok := s.sourceInstancesByInstance[sourceInstance] + if !ok { + return nil, -1, errors.Wrapf(ErrDigestNotFound, "error getting image source for instance %q", sourceInstance) + } + return src.GetBlob(ctx, blob, bic) +} + +func (s *supplementedImageSource) HasThreadSafeGetBlob() bool { + checked := make(map[types.ImageSource]struct{}) + for _, sourceInstance := range s.sourceInstancesByInstance { + if _, checked := checked[sourceInstance]; checked { + continue + } + if !sourceInstance.HasThreadSafeGetBlob() { + return false + } + checked[sourceInstance] = struct{}{} + } + return true +} + +func (s *supplementedImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var ( + src types.ImageSource + digest digest.Digest + ) + requestInstanceDigest := instanceDigest + if instanceDigest == nil { + if sourceInstance, ok := s.sourceInstancesByInstance[""]; ok { + src = sourceInstance + } + } else { + digest = *instanceDigest + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + src = sourceInstance + } + if *instanceDigest == s.sourceDefaultInstances[src] { + requestInstanceDigest = nil + } + } + if src != nil { + return src.GetSignatures(ctx, requestInstanceDigest) + } + return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to read signatures", digest) +} + +func (s *supplementedImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + var src types.ImageSource + requestInstanceDigest := instanceDigest + errMsgDigest := "" + if instanceDigest == nil { + if sourceInstance, ok := s.sourceInstancesByInstance[""]; ok { + src = sourceInstance + } + } else { + errMsgDigest = string(*instanceDigest) + if sourceInstance, ok := s.sourceInstancesByInstance[*instanceDigest]; ok { + src = sourceInstance + } + if *instanceDigest == s.sourceDefaultInstances[src] { + requestInstanceDigest = nil + } + } + if src != nil { + blobInfos, err := src.LayerInfosForCopy(ctx, requestInstanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer infos for copy from instance %q", instanceDigest) + } + var manifestDigest digest.Digest + if instanceDigest != nil { + manifestDigest = *instanceDigest + } + for _, blobInfo := range blobInfos { + s.instancesByBlobDigest[blobInfo.Digest] = manifestDigest + } + return blobInfos, nil + } + return nil, errors.Wrapf(ErrDigestNotFound, "error finding instance for instance digest %q to copy layers", errMsgDigest) +} diff --git a/vendor/github.com/containers/common/pkg/timetype/timestamp.go b/vendor/github.com/containers/common/pkg/timetype/timestamp.go new file mode 100644 index 00000000000..3cbfe40980b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/timetype/timestamp.go @@ -0,0 +1,132 @@ +package timetype + +// code adapted from https://github.com/moby/moby/blob/master/api/types/time/timestamp.go + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + switch { + case strings.Contains(value, "."): + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + case strings.Contains(value, "T"): + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + case parseInLocation: + format = dateLocal + default: + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go new file mode 100644 index 00000000000..98890a686f8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -0,0 +1,24 @@ +package util + +import "regexp" + +// StringInSlice determines if a string is in a string slice, returns bool +func StringInSlice(s string, sl []string) bool { + for _, i := range sl { + if i == s { + return true + } + } + return false +} + +// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool +func StringMatchRegexSlice(s string, re []string) bool { + for _, r := range re { + m, err := regexp.MatchString(r, s) + if err == nil && m { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go new file mode 100644 index 00000000000..35201f93237 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -0,0 +1,87 @@ +//go:build linux || darwin || freebsd +// +build linux darwin freebsd + +package util + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + rootlessRuntimeDirOnce sync.Once + rootlessRuntimeDir string +) + +// isWriteableOnlyByOwner checks that the specified permission mask allows write +// access only to the owner. +func isWriteableOnlyByOwner(perm os.FileMode) bool { + return (perm & 0o722) == 0o700 +} + +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { + var rootlessRuntimeDirError error + + rootlessRuntimeDirOnce.Do(func() { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + st, err := os.Stat(runtimeDir) + if err != nil { + rootlessRuntimeDirError = err + return + } + if int(st.Sys().(*syscall.Stat_t).Uid) != os.Geteuid() { + rootlessRuntimeDirError = fmt.Errorf("XDG_RUNTIME_DIR directory %q is not owned by the current user", runtimeDir) + return + } + } + uid := fmt.Sprintf("%d", unshare.GetRootlessUID()) + if runtimeDir == "" { + tmpDir := filepath.Join("/run", "user", uid) + if err := os.MkdirAll(tmpDir, 0o700); err != nil { + logrus.Debugf("unable to make temp dir: %v", err) + } + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) + if err := os.MkdirAll(tmpDir, 0o700); err != nil { + logrus.Debugf("unable to make temp dir %v", err) + } + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + home := os.Getenv("HOME") + if home == "" { + rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty") + return + } + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + rootlessRuntimeDirError = errors.Wrap(err, "cannot resolve home") + return + } + runtimeDir = filepath.Join(resolvedHome, "rundir") + } + rootlessRuntimeDir = runtimeDir + }) + + if rootlessRuntimeDirError != nil { + return "", rootlessRuntimeDirError + } + return rootlessRuntimeDir, nil +} diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go new file mode 100644 index 00000000000..1cffb21fc31 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package util + +import ( + "github.com/pkg/errors" +) + +// getRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { + return "", errors.New("this function is not implemented for windows") +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go new file mode 100644 index 00000000000..61fce9d2298 --- /dev/null +++ b/vendor/github.com/containers/common/version/version.go @@ -0,0 +1,4 @@ +package version + +// Version is the version of the build. +const Version = "0.48.0" diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index b616e566cb1..d28cc4a3ffd 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "reflect" "strings" @@ -33,7 +32,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v7" - "github.com/vbauerster/mpb/v7/decor" "golang.org/x/sync/semaphore" "golang.org/x/term" ) @@ -199,7 +197,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, return nil, err } - reportWriter := ioutil.Discard + reportWriter := io.Discard if options.ReportWriter != nil { reportWriter = options.ReportWriter @@ -232,7 +230,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // createProgressBar() will print a single line instead. progressOutput := reportWriter if !isTTY(reportWriter) { - progressOutput = ioutil.Discard + progressOutput = io.Discard } c := &copier{ @@ -713,8 +711,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - // If encrypted and decryption keys provided, we should try to decrypt - ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal if options.OptimizeDestinationImageAlreadyExists { @@ -1070,85 +1066,6 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc return man, manifestDigest, nil } -// newProgressPool creates a *mpb.Progress. -// The caller must eventually call pool.Wait() after the pool will no longer be updated. -// NOTE: Every progress bar created within the progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. -func (c *copier) newProgressPool() *mpb.Progress { - return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) -} - -// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar -func customPartialBlobDecorFunc(s decor.Statistics) string { - if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) - } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" - percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) -} - -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -// NOTE: Every progress bar created within a progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. -func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - // onComplete will replace prefix once the bar/spinner has completed - onComplete = prefix + " " + onComplete - - // Use a normal progress bar when we know the size (i.e., size > 0). - // Otherwise, use a spinner to indicate that something's happening. - var bar *mpb.Bar - if info.Size > 0 { - if partial { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.Any(customPartialBlobDecorFunc), - ), - ) - } else { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), - ), - ) - } - } else { - bar = pool.New(0, - mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - ) - } - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - // copyConfig copies config.json, if any, from src to dest. func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() @@ -1159,22 +1076,23 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { } defer c.concurrentBlobCopiesSemaphore.Release(1) - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) - } - destInfo, err := func() (types.BlobInfo, error) { // A scope for defer progressPool := c.newProgressPool() defer progressPool.Wait() bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") defer bar.Abort(false) + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) + } + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) if err != nil { return types.BlobInfo{}, err } - bar.SetTotal(int64(len(configBlob)), true) + + bar.mark100PercentComplete() return destInfo, nil }() if err != nil { @@ -1219,11 +1137,15 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - // Diffs are needed if we are encrypting an image or trying to decrypt an image - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) - - // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause @@ -1243,9 +1165,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists") defer bar.Abort(false) - bar.SetTotal(0, true) + bar.mark100PercentComplete() }() // Throw an event that the layer has been skipped @@ -1276,7 +1198,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // of the source file are not known yet and must be fetched. // Attempt a partial only when the source allows to retrieve a blob partially and // the destination has support for it. - if ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() && !diffIDIsNeeded { + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") hideProgressBar := true @@ -1288,12 +1210,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to wrapped: ic.c.rawSource, bar: bar, } - bar.SetTotal(srcInfo.Size, false) info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { - bar.SetRefill(srcInfo.Size - bar.Current()) - bar.SetCurrent(srcInfo.Size) - bar.SetTotal(srcInfo.Size, true) + if srcInfo.Size != -1 { + bar.SetRefill(srcInfo.Size - bar.Current()) + } + bar.mark100PercentComplete() hideProgressBar = false logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) return true, info @@ -1306,16 +1228,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } // Fallback: copy the layer, computing the diffID if we need to do so - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") defer bar.Abort(false) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) if err != nil { return types.BlobInfo{}, "", err @@ -1331,14 +1253,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } diffID = diffIDResult.digest } } - bar.SetTotal(srcInfo.Size, true) + bar.mark100PercentComplete() return blobInfo, diffID, nil }() } @@ -1348,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -1425,7 +1355,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { if isConfig { // This is guaranteed by the caller, but set it here to be explicit. canModifyBlob = false } @@ -1445,6 +1375,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } var destStream io.Reader = digestingReader + // === Update progress bars + destStream = bar.ProxyReader(destStream) + // === Decrypt the stream, if required. var decrypted bool if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { @@ -1479,9 +1412,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) } - // === Update progress bars - destStream = bar.ProxyReader(destStream) - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. if getOriginalLayerCopyWriter != nil { @@ -1669,7 +1599,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // sent there if we are not already at EOF. if getOriginalLayerCopyWriter != nil { logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) + _, err := io.Copy(io.Discard, originalLayerReader) if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) } @@ -1682,19 +1612,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) } if digestingReader.validationSucceeded { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encrypted && !decrypted { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } } if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 00000000000..585d860570d --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,148 @@ +package copy + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v7" + "github.com/vbauerster/mpb/v7/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + ) + } + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + } +} + +// mark100PercentComplete marks the progres bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.bar.IncrInt64(total) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go similarity index 63% rename from vendor/github.com/containers/image/v5/copy/progress_reader.go rename to vendor/github.com/containers/image/v5/copy/progress_channel.go index de23cec1b71..d5e9e09bda9 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_reader.go +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -1,16 +1,13 @@ package copy import ( - "context" "io" "time" - "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" - "github.com/vbauerster/mpb/v7" ) -// progressReader is a reader that reports its progress on an interval. +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. type progressReader struct { source io.Reader channel chan<- types.ProgressProperties @@ -80,27 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) { } return n, err } - -// blobChunkAccessorProxy wraps a BlobChunkAccessor and keeps track of how many bytes -// are received. -type blobChunkAccessorProxy struct { - wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor - bar *mpb.Bar // A progress bar updated with the number of bytes read so far -} - -// GetBlobAt returns a sequential channel of readers that contain data for the requested -// blob chunks, and a channel that might get a single error value. -// The specified chunks must be not overlapping and sorted by their offset. -// The readers must be fully consumed, in the order they are returned, before blocking -// to read the next chunk. -func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) - if err == nil { - total := int64(0) - for _, c := range chunks { - total += int64(c.Length) - } - s.bar.IncrInt64(total) - } - return rc, errs, err -} diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go new file mode 100644 index 00000000000..3b135e68e50 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -0,0 +1,294 @@ +package directory + +import ( + "context" + "io" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const version = "Directory Transport Version: 1.1\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + +type dirImageDestination struct { + ref dirReference + desiredLayerCompression types.LayerCompression +} + +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(sys *types.SystemContext, ref dirReference) (types.ImageDestination, error) { + desiredLayerCompression := types.PreserveOriginal + if sys != nil { + if sys.DirForceCompress { + desiredLayerCompression = types.Compress + + if sys.DirForceDecompress { + return nil, errors.Errorf("Cannot compress and decompress at the same time") + } + } + if sys.DirForceDecompress { + desiredLayerCompression = types.Decompress + } + } + d := &dirImageDestination{ref: ref, desiredLayerCompression: desiredLayerCompression} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := os.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644) + if err != nil { + return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath()) + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *dirImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *dirImageDestination) Close() error { + return nil +} + +func (d *dirImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.desiredLayerCompression +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *dirImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *dirImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return types.BlobInfo{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + } + + blobPath := d.ref.layerPath(blobDigest) + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: blobDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { + return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + for i, sig := range signatures { + if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { + return err + } + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error { + return nil +} + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := os.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := os.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go new file mode 100644 index 00000000000..8b509112aa8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -0,0 +1,95 @@ +package directory + +import ( + "context" + "io" + "os" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +type dirImageSource struct { + ref dirReference +} + +// newImageSource returns an ImageSource reading from an existing directory. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ref dirReference) types.ImageSource { + return &dirImageSource{ref} +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *dirImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *dirImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) + if err != nil { + return nil, "", err + } + return m, manifest.GuessMIMEType(m), err +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *dirImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + r, err := os.Open(s.ref.layerPath(info.Digest)) + if err != nil { + return nil, -1, err + } + fi, err := r.Stat() + if err != nil { + return nil, -1, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + signatures := [][]byte{} + for i := 0; ; i++ { + signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest)) + if err != nil { + if os.IsNotExist(err) { + break + } + return nil, err + } + signatures = append(signatures, signature) + } + return signatures, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *dirImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go new file mode 100644 index 00000000000..e542d888c22 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -0,0 +1,189 @@ +package directory + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for directory paths. +var Transport = dirTransport{} + +type dirTransport struct{} + +func (t dirTransport) Name() string { + return "dir" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// dirReference is an ImageReference for directory paths. +type dirReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + path string // As specified by the user. May be relative, contain symlinks, etc. + resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no directory.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// dirTransport.ParseReference; callers who intentionally deal with directories +// can use directory.NewReference. + +// NewReference returns a directory reference for a specified path. +// +// We do not expose an API supplying the resolvedPath; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedPath. +func NewReference(path string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) + if err != nil { + return nil, err + } + return dirReference{path: path, resolvedPath: resolved}, nil +} + +func (ref dirReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref dirReference) StringWithinTransport() string { + return ref.path +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref dirReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref dirReference) PolicyConfigurationIdentity() string { + return ref.resolvedPath +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref dirReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedPath + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by dirTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src := newImageSource(ref) + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ref), nil +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for dir: images") +} + +// manifestPath returns a path for the manifest within a directory using our conventions. +func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json") + } + return filepath.Join(ref.path, "manifest.json") +} + +// layerPath returns a path for a layer tarball within a directory using our conventions. +func (ref dirReference) layerPath(digest digest.Digest) string { + // FIXME: Should we keep the digest identification? + return filepath.Join(ref.path, digest.Encoded()) +} + +// signaturePath returns a path for a signature within a directory using our conventions. +func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string { + if instanceDigest != nil { + return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)) + } + return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) +} + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go new file mode 100644 index 00000000000..71136b88089 --- /dev/null +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -0,0 +1,56 @@ +package explicitfilepath + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. +// To do so, all elements of the input path must exist; as a special case, the final component may be +// a non-existent name (but not a symlink pointing to a non-existent name) +// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +func ResolvePathToFullyExplicit(path string) (string, error) { + switch _, err := os.Lstat(path); { + case err == nil: + return resolveExistingPathToFullyExplicit(path) + case os.IsNotExist(err): + parent, file := filepath.Split(path) + resolvedParent, err := resolveExistingPathToFullyExplicit(parent) + if err != nil { + return "", err + } + if file == "." || file == ".." { + // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. + // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. + // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components + // in the resulting path, and especially not at the end. + return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) + } + resolvedPath := filepath.Join(resolvedParent, file) + // As a sanity check, ensure that there are no "." or ".." components. + cleanedResolvedPath := filepath.Clean(resolvedPath) + if cleanedResolvedPath != resolvedPath { + // Coverage: This should never happen. + return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) + } + return resolvedPath, nil + default: // err != nil, unrecognized + return "", err + } +} + +// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, +// but without the special case for missing final component. +func resolveExistingPathToFullyExplicit(path string) (string, error) { + resolved, err := filepath.Abs(path) + if err != nil { + return "", err // Coverage: This can fail only if os.Getwd() fails. + } + resolved, err = filepath.EvalSymlinks(resolved) + if err != nil { + return "", err + } + return filepath.Clean(resolved), nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/dest.go b/vendor/github.com/containers/image/v5/docker/archive/dest.go new file mode 100644 index 00000000000..d4248db21f0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/dest.go @@ -0,0 +1,81 @@ +package archive + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +type archiveImageDestination struct { + *tarfile.Destination // Implements most of types.ImageDestination + ref archiveReference + archive *tarfile.Writer // Should only be closed if writer != nil + writer io.Closer // May be nil if the archive is shared +} + +func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { + if ref.sourceIndex != -1 { + return nil, errors.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex) + } + + var archive *tarfile.Writer + var writer io.Closer + if ref.archiveWriter != nil { + archive = ref.archiveWriter + writer = nil + } else { + fh, err := openArchiveForWriting(ref.path) + if err != nil { + return nil, err + } + + archive = tarfile.NewWriter(fh) + writer = fh + } + tarDest := tarfile.NewDestination(sys, archive, ref.ref) + if sys != nil && sys.DockerArchiveAdditionalTags != nil { + tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) + } + return &archiveImageDestination{ + Destination: tarDest, + ref: ref, + archive: archive, + writer: writer, + }, nil +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Decompress +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *archiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *archiveImageDestination) Close() error { + if d.writer != nil { + return d.writer.Close() + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if d.writer != nil { + return d.archive.Close() + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go new file mode 100644 index 00000000000..4bb519a2622 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go @@ -0,0 +1,120 @@ +package archive + +import ( + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Reader manages a single Docker archive, allows listing its contents and accessing +// individual images with less overhead than creating image references individually +// (because the archive is, if necessary, copied or decompressed only once). +type Reader struct { + path string // The original, user-specified path; not the maintained temporary file, if any + archive *tarfile.Reader +} + +// NewReader returns a Reader for path. +// The caller should call .Close() on the returned object. +func NewReader(sys *types.SystemContext, path string) (*Reader, error) { + archive, err := tarfile.NewReaderFromFile(sys, path) + if err != nil { + return nil, err + } + return &Reader{ + path: path, + archive: archive, + }, nil +} + +// Close deletes temporary files associated with the Reader, if any. +func (r *Reader) Close() error { + return r.archive.Close() +} + +// NewReaderForReference creates a Reader from a Reader-independent imageReference, which must be from docker/archive.Transport, +// and a variant of imageReference that points at the same image within the reader. +// The caller should call .Close() on the returned Reader. +func NewReaderForReference(sys *types.SystemContext, ref types.ImageReference) (*Reader, types.ImageReference, error) { + standalone, ok := ref.(archiveReference) + if !ok { + return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + } + if standalone.archiveReader != nil { + return nil, nil, errors.Errorf("Internal error: NewReaderForReference called for a reader-bound reference %s", standalone.StringWithinTransport()) + } + reader, err := NewReader(sys, standalone.path) + if err != nil { + return nil, nil, err + } + succeeded := false + defer func() { + if !succeeded { + reader.Close() + } + }() + readerRef, err := newReference(standalone.path, standalone.ref, standalone.sourceIndex, reader.archive, nil) + if err != nil { + return nil, nil, err + } + succeeded = true + return reader, readerRef, nil +} + +// List returns the a set of references for images in the Reader, +// grouped by the image the references point to. +// The references are valid only until the Reader is closed. +func (r *Reader) List() ([][]types.ImageReference, error) { + res := [][]types.ImageReference{} + for imageIndex, image := range r.archive.Manifest { + refs := []types.ImageReference{} + for _, tag := range image.RepoTags { + parsedTag, err := reference.ParseNormalizedNamed(tag) + if err != nil { + return nil, errors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex) + } + nt, ok := parsedTag.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String()) + } + ref, err := newReference(r.path, nt, -1, r.archive, nil) + if err != nil { + return nil, errors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex) + } + refs = append(refs, ref) + } + if len(refs) == 0 { + ref, err := newReference(r.path, nil, imageIndex, r.archive, nil) + if err != nil { + return nil, errors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex) + } + refs = append(refs, ref) + } + res = append(res, refs) + } + return res, nil +} + +// ManifestTagsForReference returns the set of tags “matching” ref in reader, as strings +// (i.e. exposing the short names before normalization). +// The function reports an error if ref does not identify a single image. +// If ref contains a NamedTagged reference, only a single tag “matching” ref is returned; +// If ref contains a source index, or neither a NamedTagged nor a source index, all tags +// matching the image are returned. +// Almost all users should use List() or ImageReference.DockerReference() instead. +func (r *Reader) ManifestTagsForReference(ref types.ImageReference) ([]string, error) { + archiveRef, ok := ref.(archiveReference) + if !ok { + return nil, errors.Errorf("Internal error: ManifestTagsForReference called for a non-docker/archive ImageReference %s", transports.ImageName(ref)) + } + manifestItem, tagIndex, err := r.archive.ChooseManifestItem(archiveRef.ref, archiveRef.sourceIndex) + if err != nil { + return nil, err + } + if tagIndex != -1 { + return []string{manifestItem.RepoTags[tagIndex]}, nil + } + return manifestItem.RepoTags, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go new file mode 100644 index 00000000000..7acca210ef1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/src.go @@ -0,0 +1,42 @@ +package archive + +import ( + "context" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/types" +) + +type archiveImageSource struct { + *tarfile.Source // Implements most of types.ImageSource + ref archiveReference +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) { + var archive *tarfile.Reader + var closeArchive bool + if ref.archiveReader != nil { + archive = ref.archiveReader + closeArchive = false + } else { + a, err := tarfile.NewReaderFromFile(sys, ref.path) + if err != nil { + return nil, err + } + archive = a + closeArchive = true + } + src := tarfile.NewSource(archive, closeArchive, ref.ref, ref.sourceIndex) + return &archiveImageSource{ + Source: src, + ref: ref, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *archiveImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go new file mode 100644 index 00000000000..9a48cb46cc4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -0,0 +1,211 @@ +package archive + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + ctrImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for local Docker archives. +var Transport = archiveTransport{} + +type archiveTransport struct{} + +func (t archiveTransport) Name() string { + return "docker-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in archiveReference.PolicyConfigurationIdentity. + return errors.New(`docker-archive: does not support any scopes except the default "" one`) +} + +// archiveReference is an ImageReference for Docker images. +type archiveReference struct { + path string + // May be nil to read the only image in an archive, or to create an untagged image. + ref reference.NamedTagged + // If not -1, a zero-based index of the image in the manifest. Valid only for sources. + // Must not be set if ref is set. + sourceIndex int + // If not nil, must have been created from path (but archiveReader.path may point at a temporary + // file, not necessarily path precisely). + archiveReader *tarfile.Reader + // If not nil, must have been created for path + archiveWriter *tarfile.Writer +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + if refString == "" { + return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) + } + + parts := strings.SplitN(refString, ":", 2) + path := parts[0] + var nt reference.NamedTagged + sourceIndex := -1 + + if len(parts) == 2 { + // A :tag or :@index was specified. + if len(parts[1]) > 0 && parts[1][0] == '@' { + i, err := strconv.Atoi(parts[1][1:]) + if err != nil { + return nil, errors.Wrapf(err, "Invalid source index %s", parts[1]) + } + if i < 0 { + return nil, errors.Errorf("Invalid source index @%d: must not be negative", i) + } + sourceIndex = i + } else { + ref, err := reference.ParseNormalizedNamed(parts[1]) + if err != nil { + return nil, errors.Wrapf(err, "docker-archive parsing reference") + } + ref = reference.TagNameOnly(ref) + refTagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { // If ref contains a digest, TagNameOnly does not change it + return nil, errors.Errorf("reference does not include a tag: %s", ref.String()) + } + nt = refTagged + } + } + + return newReference(path, nt, sourceIndex, nil, nil) +} + +// NewReference returns a Docker archive reference for a path and an optional reference. +func NewReference(path string, ref reference.NamedTagged) (types.ImageReference, error) { + return newReference(path, ref, -1, nil, nil) +} + +// NewIndexReference returns a Docker archive reference for a path and a zero-based source manifest index. +func NewIndexReference(path string, sourceIndex int) (types.ImageReference, error) { + return newReference(path, nil, sourceIndex, nil, nil) +} + +// newReference returns a docker archive reference for a path, an optional reference or sourceIndex, +// and optionally a tarfile.Reader and/or a tarfile.Writer matching path. +func newReference(path string, ref reference.NamedTagged, sourceIndex int, + archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) { + if strings.Contains(path, ":") { + return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path) + } + if ref != nil && sourceIndex != -1 { + return nil, errors.Errorf("Invalid docker-archive: reference: cannot use both a tag and a source index") + } + if _, isDigest := ref.(reference.Canonical); isDigest { + return nil, errors.Errorf("docker-archive doesn't support digest references: %s", ref.String()) + } + if sourceIndex != -1 && sourceIndex < 0 { + return nil, errors.Errorf("Invalid docker-archive: reference: index @%d must not be negative", sourceIndex) + } + return archiveReference{ + path: path, + ref: ref, + sourceIndex: sourceIndex, + archiveReader: archiveReader, + archiveWriter: archiveWriter, + }, nil +} + +func (ref archiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref archiveReference) StringWithinTransport() string { + switch { + case ref.ref != nil: + return fmt.Sprintf("%s:%s", ref.path, ref.ref.String()) + case ref.sourceIndex != -1: + return fmt.Sprintf("%s:@%d", ref.path, ref.sourceIndex) + default: + return ref.path + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref archiveReference) DockerReference() reference.Named { + return ref.ref +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref archiveReference) PolicyConfigurationIdentity() string { + // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. + return "" +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref archiveReference) PolicyConfigurationNamespaces() []string { + // TODO + return []string{} +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return ctrImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Not really supported, for safety reasons. + return errors.New("Deleting images not implemented for docker-archive: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go new file mode 100644 index 00000000000..6a4b8c645a1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go @@ -0,0 +1,82 @@ +package archive + +import ( + "io" + "os" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Writer manages a single in-progress Docker archive and allows adding images to it. +type Writer struct { + path string // The original, user-specified path; not the maintained temporary file, if any + archive *tarfile.Writer + writer io.Closer +} + +// NewWriter returns a Writer for path. +// The caller should call .Close() on the returned object. +func NewWriter(sys *types.SystemContext, path string) (*Writer, error) { + fh, err := openArchiveForWriting(path) + if err != nil { + return nil, err + } + archive := tarfile.NewWriter(fh) + + return &Writer{ + path: path, + archive: archive, + writer: fh, + }, nil +} + +// Close writes all outstanding data about images to the archive, and +// releases state associated with the Writer, if any. +// No more images can be added after this is called. +func (w *Writer) Close() error { + err := w.archive.Close() + if err2 := w.writer.Close(); err2 != nil && err == nil { + err = err2 + } + return err +} + +// NewReference returns an ImageReference that allows adding an image to Writer, +// with an optional reference. +func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) { + return newReference(w.path, destinationRef, -1, nil, w.archive) +} + +// openArchiveForWriting opens path for writing a tar archive, +// making a few sanity checks. +func openArchiveForWriting(path string) (*os.File, error) { + // path can be either a pipe or a regular file + // in the case of a pipe, we require that we can open it for write + // in the case of a regular file, we don't want to overwrite any pre-existing file + // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, + // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) + fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, errors.Wrapf(err, "opening file %q", path) + } + succeeded := false + defer func() { + if !succeeded { + fh.Close() + } + }() + fhStat, err := fh.Stat() + if err != nil { + return nil, errors.Wrapf(err, "statting file %q", path) + } + + if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { + return nil, errors.New("docker-archive doesn't support modifying existing images") + } + + succeeded = true + return fh, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go new file mode 100644 index 00000000000..323a02fc095 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -0,0 +1,85 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/v5/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if sys != nil && sys.DockerDaemonHost != "" { + host = sys.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set + // TLSClientConfig to nil. This can be achieved by using the form `http://` + url, err := dockerclient.ParseHostURL(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if url.Scheme != "unix" { + if url.Scheme == "http" { + httpClient = httpConfig() + } else { + hc, err := tlsConfig(sys) + if err != nil { + return nil, err + } + httpClient = hc + } + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(sys *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if sys != nil && sys.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go new file mode 100644 index 00000000000..f68981472f3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type daemonImageDestination struct { + ref daemonReference + mustMatchRuntimeOS bool + *tarfile.Destination // Implements most of types.ImageDestination + archive *tarfile.Writer + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + // Other state + committed bool // writer has been closed +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { + if ref.ref == nil { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + var mustMatchRuntimeOS = true + if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "initializing docker engine client") + } + + reader, writer := io.Pipe() + archive := tarfile.NewWriter(writer) + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + goroutineContext, goroutineCancel := context.WithCancel(ctx) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(sys, archive, namedTaggedRef), + archive: archive, + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + if err := reader.CloseWithError(err); err != nil { + logrus.Debugf("imageLoadGoroutine: Error during reader.CloseWithError: %v", err) + } + } + }() + + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + err = errors.Wrap(err, "saving image to docker engine") + return + } + defer resp.Body.Close() +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return d.mustMatchRuntimeOS +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() error { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including + // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the + // net/http version with native Context support in Go 1.7) do not always actually immediately cancel + // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and + // return early if the context is canceled without terminating the goroutine at all. + // So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil { + return err + } + } + d.goroutineCancel() + + return nil +} + +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.archive.Close(); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-d.statusChannel: + return err + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go new file mode 100644 index 00000000000..a6d8a6cf587 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "context" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +type daemonImageSource struct { + ref daemonReference + *tarfile.Source // Implements most of types.ImageSource +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "initializing docker engine client") + } + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) + if err != nil { + return nil, errors.Wrap(err, "loading image from docker engine") + } + defer inputStream.Close() + + archive, err := tarfile.NewReaderFromStream(sys, inputStream) + if err != nil { + return nil, err + } + src := tarfile.NewSource(archive, true, nil, -1) + return &daemonImageSource{ + ref: ref, + Source: src, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go new file mode 100644 index 00000000000..4e4ed688148 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -0,0 +1,223 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // ID values cannot be effectively namespaced, and are clearly invalid host:port values. + if _, err := digest.Parse(scope); err == nil { + return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + } + + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseAnyReference interprets such strings as digests. + if dgst, err := digest.Parse(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if reference.FamiliarName(ref) == digest.Canonical.String() { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. + // This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return reference.FamiliarString(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input: + // a single image can be namespaced either using the name or the ID depending on how it is named. + // + // That’s fairly unexpected, but we have to cope somehow. + // + // So, use the ordinary docker/policyconfiguration namespacing for named images. + // image IDs all fall into the root namespace. + // Users can set up the root namespace to be either untrusted or rejected, + // and to set up specific trust for named namespaces. This allows verifying image + // identity when a name is known, and unnamed images would be untrusted or rejected. + switch { + case ref.id != "": + return "" // This still allows using the default "" scope to define a global policy for ID-identified images. + case ref.ref != nil: + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + switch { + case ref.id != "": + return []string{} + case ref.ref != nil: + return policyconfiguration.DockerReferenceNamespaces(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return errors.Errorf("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 9837235d838..f537de72da3 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,13 +1,11 @@ package docker import ( - "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -654,7 +652,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("refresh_token", c.auth.IdentityToken) params.Add("client_id", "containers/image") - authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) + authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index e3275aa457b..d02100cf80b 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -7,7 +7,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -592,7 +591,7 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) if err != nil { return err } - err = ioutil.WriteFile(url.Path, signature, 0644) + err = os.WriteFile(url.Path, signature, 0644) if err != nil { return err } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index c08e5538a14..085a3afcc5c 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -308,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, break } toSkip := c.Offset - currentOffset - if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { errs <- err break } @@ -316,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } s := signalCloseReader{ closed: make(chan interface{}), - stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), + stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), consumeStream: true, } streams <- s @@ -515,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( switch url.Scheme { case "file": logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) + sig, err := os.ReadFile(url.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil @@ -611,8 +610,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) } - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return fmt.Errorf("computing manifest digest: %w", err) + } + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) // When retrieving the digest from a registry >= 2.3 use the following header: // "Accept": "application/vnd.docker.distribution.manifest.v2+json" @@ -630,11 +632,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) } - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - for i := 0; ; i++ { url := signatureStorageURL(c.signatureBase, manifestDigest, i) missing, err := c.deleteOneSignature(url) @@ -765,7 +762,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) { func (s signalCloseReader) Close() error { defer close(s.closed) if s.consumeStream { - if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { + if _, err := io.Copy(io.Discard, s.stream); err != nil { s.stream.Close() return err } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go new file mode 100644 index 00000000000..7e1580990fd --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -0,0 +1,200 @@ +package tarfile + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/streamdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. +type Destination struct { + archive *Writer + repoTags []reference.NamedTagged + // Other state. + config []byte + sysCtx *types.SystemContext +} + +// NewDestination returns a tarfile.Destination adding images to the specified Writer. +func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination { + repoTags := []reference.NamedTagged{} + if ref != nil { + repoTags = append(repoTags, ref) + } + return &Destination{ + archive: archive, + repoTags: repoTags, + sysCtx: sys, + } +} + +// AddRepoTags adds the specified tags to the destination's repoTags. +func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { + d.repoTags = append(d.repoTags, tags...) +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (d *Destination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *Destination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Storing signatures for docker tar files is not supported") +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *Destination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *Destination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *Destination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where + // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t + // much benefit from concurrency, mostly just extra CPU, memory and I/O contention. + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed data. + if inputInfo.Size == -1 || inputInfo.Digest == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") + streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo) + if err != nil { + return types.BlobInfo{}, err + } + defer cleanup() + stream = streamCopy + logrus.Debugf("... streaming done") + } + + if err := d.archive.lock(); err != nil { + return types.BlobInfo{}, err + } + defer d.archive.unlock() + + // Maybe the blob has been already sent + ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo) + if err != nil { + return types.BlobInfo{}, err + } + if ok { + return reusedInfo, nil + } + + if isConfig { + buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream") + } + d.config = buf + if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { + return types.BlobInfo{}, errors.Wrap(err, "writing Config file") + } + } else { + if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { + return types.BlobInfo{}, err + } + } + d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}) + return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if err := d.archive.lock(); err != nil { + return false, types.BlobInfo{}, err + } + defer d.archive.unlock() + + return d.archive.tryReusingBlobLocked(info) +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported for docker tar files`) + } + // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, + // so the caller trying a different manifest kind would be pointless. + var man manifest.Schema2 + if err := json.Unmarshal(m, &man); err != nil { + return errors.Wrap(err, "parsing manifest") + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + } + + if err := d.archive.lock(); err != nil { + return err + } + defer d.archive.unlock() + + if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil { + return err + } + + return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags) +} + +// PutSignatures would add the given signatures to the docker tarfile (currently not supported). +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents). +func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.Errorf(`Manifest lists are not supported for docker tar files`) + } + if len(signatures) != 0 { + return errors.Errorf("Storing signatures for docker tar files is not supported") + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go new file mode 100644 index 00000000000..c77c002d15e --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -0,0 +1,268 @@ +package tarfile + +import ( + "archive/tar" + "encoding/json" + "io" + "os" + "path" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Reader is a ((docker save)-formatted) tar archive that allows random access to any component. +type Reader struct { + // None of the fields below are modified after the archive is created, until .Close(); + // this allows concurrent readers of the same archive. + path string // "" if the archive has already been closed. + removeOnClose bool // Remove file on close if true + Manifest []ManifestItem // Guaranteed to exist after the archive is created. +} + +// NewReaderFromFile returns a Reader for the specified path. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "opening file %q", path) + } + defer file.Close() + + // If the file is already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewReaderFromStream. + stream, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, errors.Wrapf(err, "detecting compression for file %q", path) + } + defer stream.Close() + if !isCompressed { + return newReader(path, false) + } + return NewReaderFromStream(sys, stream) +} + +// NewReaderFromStream returns a Reader for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewReaderFromFile returns. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { + // Save inputStream to a temporary file + tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + if err != nil { + return nil, errors.Wrap(err, "creating temporary file") + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, errors.Wrap(err, "auto-decompressing input") + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { + return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name()) + } + succeeded = true + + return newReader(tarCopyFile.Name(), true) +} + +// newReader creates a Reader for the specified path and removeOnClose flag. +// The caller should call .Close() on the returned archive when done. +func newReader(path string, removeOnClose bool) (*Reader, error) { + // This is a valid enough archive, except Manifest is not yet filled. + r := Reader{ + path: path, + removeOnClose: removeOnClose, + } + succeeded := false + defer func() { + if !succeeded { + r.Close() + } + }() + + // We initialize Manifest immediately when constructing the Reader instead + // of later on-demand because every caller will need the data, and because doing it now + // removes the need to synchronize the access/creation of the data if the archive is later + // used from multiple goroutines to access different images. + + // FIXME? Do we need to deal with the legacy format? + bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize) + if err != nil { + return nil, err + } + if err := json.Unmarshal(bytes, &r.Manifest); err != nil { + return nil, errors.Wrap(err, "decoding tar manifest.json") + } + + succeeded = true + return &r, nil +} + +// Close removes resources associated with an initialized Reader, if any. +func (r *Reader) Close() error { + path := r.path + r.path = "" // Mark the archive as closed + if r.removeOnClose { + return os.Remove(path) + } + return nil +} + +// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or +// both of which should be (nil, -1). +// On success, it returns the manifest item and an index of the matching tag, if a tag was used +// for matching; the index is -1 if a tag was not used. +func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) { + switch { + case ref != nil && sourceIndex != -1: + return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.String(), sourceIndex) + + case ref != nil: + refString := ref.String() + for i := range r.Manifest { + for tagIndex, tag := range r.Manifest[i].RepoTags { + parsedTag, err := reference.ParseNormalizedNamed(tag) + if err != nil { + return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i) + } + if parsedTag.String() == refString { + return &r.Manifest[i], tagIndex, nil + } + } + } + return nil, -1, errors.Errorf("Tag %#v not found", refString) + + case sourceIndex != -1: + if sourceIndex >= len(r.Manifest) { + return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available", + sourceIndex, len(r.Manifest)) + } + return &r.Manifest[sourceIndex], -1, nil + + default: + if len(r.Manifest) != 1 { + return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest)) + } + return &r.Manifest[0], -1, nil + } +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. +// It is safe to call this method from multiple goroutines simultaneously. +// The caller should call .Close() on the returned stream. +func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) { + // This is only a sanity check; if anyone did concurrently close ra, this access is technically + // racy against the write in .Close(). + if r.path == "" { + return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader") + } + + f, err := os.Open(r.path) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching componentPath within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + componentPath = path.Clean(componentPath) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if path.Clean(h.Name) == componentPath { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +// It is safe to call this method from multiple goroutines simultaneously. +func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) { + file, err := r.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "loading tar component %s", path) + } + defer file.Close() + bytes, err := iolimits.ReadAtMost(file, limit) + if err != nil { + return nil, err + } + return bytes, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go new file mode 100644 index 00000000000..8e9be17c189 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -0,0 +1,330 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "io" + "os" + "path" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Source is a partial implementation of types.ImageSource for reading from tarPath. +type Source struct { + archive *Reader + closeArchive bool // .Close() the archive when the source is closed. + // If ref is nil and sourceIndex is -1, indicates the only image in the archive. + ref reference.NamedTagged // May be nil + sourceIndex int // May be -1 + // The following data is only available after ensureCachedDataIsPresent() succeeds + tarManifest *ManifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. + cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe + cacheDataResult error // Private state for ensureCachedDataIsPresent +} + +type layerInfo struct { + path string + size int64 +} + +// NewSource returns a tarfile.Source for an image in the specified archive matching ref +// and sourceIndex (or the only image if they are (nil, -1)). +// The archive will be closed if closeArchive +func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source { + return &Source{ + archive: archive, + closeArchive: closeArchive, + ref: ref, + sourceIndex: sourceIndex, + } +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +// It is safe to call this from multi-threaded code. +func (s *Source) ensureCachedDataIsPresent() error { + s.cacheDataLock.Do(func() { + s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() + }) + return s.cacheDataResult +} + +// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. +// Call ensureCachedDataIsPresent instead. +func (s *Source) ensureCachedDataIsPresentPrivate() error { + tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex) + if err != nil { + return err + } + + // Read and parse config. + configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize) + if err != nil { + return err + } + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config) + } + if parsedConfig.RootFS == nil { + return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config) + } + + knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) + if err != nil { + return err + } + + // Success; commit. + s.tarManifest = tarManifest + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + return nil +} + +// Close removes resources associated with an initialized Source, if any. +func (s *Source) Close() error { + if s.closeArchive { + return s.archive.Close() + } + return nil +} + +// TarManifest returns contents of manifest.json +func (s *Source) TarManifest() []ManifestItem { + return s.archive.Manifest +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[digest.Digest]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := path.Clean(tarManifest.Layers[i]) + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.archive.path) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + layerPath := path.Clean(h.Name) + // FIXME: Cache this data across images in Reader. + if li, ok := unknownLayerSizes[layerPath]; ok { + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) + if err != nil { + return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) + } + } + li.size = uncompressedSize + delete(unknownLayerSizes, layerPath) + } + } + if len(unknownLayerSizes) != 0 { + return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary instances. +func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`) + } + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := manifest.Schema2{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + LayersDescriptors: []manifest.Schema2Descriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { + io.Reader + underlyingCloser func() error + uncompressedCloser func() error +} + +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err + } + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, + underlyingStream, err := s.archive.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() + + // In order to handle the fact that digests != diffIDs (and thus that a + // caller which is trying to verify the blob will run into problems), + // we need to decompress blobs. This is a bit ugly, but it's a + // consequence of making everything addressable by their DiffID rather + // than by their digest... + // + // In particular, because the v2s2 manifest being generated uses + // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of + // layers not their _actual_ digest. The result is that copy/... will + // be verifying a "digest" which is not the actual layer's digest (but + // is instead the DiffID). + + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) + if err != nil { + return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest) + } + + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, + } + closeUnderlyingStream = false + + return newStream, li.size, nil + } + + return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + return [][]byte{}, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go new file mode 100644 index 00000000000..6e6ccd2d808 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go @@ -0,0 +1,28 @@ +package tarfile + +import ( + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API. + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` +} + +type imageID string diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go new file mode 100644 index 00000000000..255f0d354ed --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -0,0 +1,381 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Writer allows creating a (docker save)-formatted tar archive containing one or more images. +type Writer struct { + mutex sync.Mutex + // ALL of the following members can only be accessed with the mutex held. + // Use Writer.lock() to obtain the mutex. + writer io.Writer + tar *tar.Writer // nil if the Writer has already been closed. + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + repositories map[string]map[string]string + legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent. + manifest []ManifestItem + manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above. +} + +// NewWriter returns a Writer for the specified io.Writer. +// The caller must eventually call .Close() on the returned object to create a valid archive. +func NewWriter(dest io.Writer) *Writer { + return &Writer{ + writer: dest, + tar: tar.NewWriter(dest), + blobs: make(map[digest.Digest]types.BlobInfo), + repositories: map[string]map[string]string{}, + legacyLayers: map[string]struct{}{}, + manifestByConfig: map[digest.Digest]int{}, + } +} + +// lock does some sanity checks and locks the Writer. +// If this function succeeds, the caller must call w.unlock. +// Do not use Writer.mutex directly. +func (w *Writer) lock() error { + w.mutex.Lock() + if w.tar == nil { + w.mutex.Unlock() + return errors.New("Internal error: trying to use an already closed tarfile.Writer") + } + return nil +} + +// unlock releases the lock obtained by Writer.lock +// Do not use Writer.mutex directly. +func (w *Writer) unlock() { + w.mutex.Unlock() +} + +// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata. +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// The caller must have locked the Writer. +func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") + } + if blob, ok := w.blobs[info.Digest]; ok { + return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + } + return false, types.BlobInfo{}, nil +} + +// recordBlob records metadata of a recorded blob, which must contain at least a digest and size. +// The caller must have locked the Writer. +func (w *Writer) recordBlobLocked(info types.BlobInfo) { + w.blobs[info.Digest] = info +} + +// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer +// The caller must have locked the Writer. +func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error { + if _, ok := w.legacyLayers[layerID]; !ok { + // Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). + // See also the comment in physicalLayerPath. + physicalLayerPath := w.physicalLayerPath(layerDigest) + if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { + return errors.Wrap(err, "creating layer symbolic link") + } + + b := []byte("1.0") + if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil { + return errors.Wrap(err, "writing VERSION file") + } + + if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil { + return errors.Wrap(err, "writing config json file") + } + + w.legacyLayers[layerID] = struct{}{} + } + return nil +} + +// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image. +func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error { + var chainID digest.Digest + lastLayerID := "" + for i, l := range layerDescriptors { + // The legacy format requires a config file per layer + layerConfig := make(map[string]interface{}) + + // The root layer doesn't have any parent + if lastLayerID != "" { + layerConfig["parent"] = lastLayerID + } + // The top layer configuration file is generated by using subpart of the image configuration + if i == len(layerDescriptors)-1 { + var config map[string]*json.RawMessage + err := json.Unmarshal(configBytes, &config) + if err != nil { + return errors.Wrap(err, "unmarshaling config") + } + for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { + layerConfig[attr] = config[attr] + } + } + + // This chainID value matches the computation in docker/docker/layer.CreateChainID … + if chainID == "" { + chainID = l.Digest + } else { + chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) + } + // … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because + // we create the image configs differently in details. At least recent versions allocate new IDs on load, + // so this is fine as long as the IDs we use are unique / cannot loop. + // + // For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created + // config makes sure that everything uses the same “namespace”; a bit less efficient but clearer. + // + // Temporarily add the chainID to the config, only for the purpose of generating the image ID. + layerConfig["layer_id"] = chainID + b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point. + if err != nil { + return errors.Wrap(err, "marshaling layer config") + } + delete(layerConfig, "layer_id") + layerID := digest.Canonical.FromBytes(b).Hex() + layerConfig["id"] = layerID + + configBytes, err := json.Marshal(layerConfig) + if err != nil { + return errors.Wrap(err, "marshaling layer config") + } + + if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil { + return err + } + + lastLayerID = layerID + } + + if lastLayerID != "" { + for _, repoTag := range repoTags { + if val, ok := w.repositories[repoTag.Name()]; ok { + val[repoTag.Tag()] = lastLayerID + } else { + w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID} + } + } + } + return nil +} + +// checkManifestItemsMatch checks that a and b describe the same image, +// and returns an error if that’s not the case (which should never happen). +func checkManifestItemsMatch(a, b *ManifestItem) error { + if a.Config != b.Config { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config) + } + if len(a.Layers) != len(b.Layers) { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers) + } + for i := range a.Layers { + if a.Layers[i] != b.Layers[i] { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i]) + } + } + // Ignore RepoTags, that will be built later. + // Ignore Parent and LayerSources, which we don’t set to anything meaningful. + return nil +} + +// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags +// The caller must have locked the Writer. +func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error { + layerPaths := []string{} + for _, l := range layerDescriptors { + layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest)) + } + + var item *ManifestItem + newItem := ManifestItem{ + Config: w.configPath(configDigest), + RepoTags: []string{}, + Layers: layerPaths, + Parent: "", // We don’t have this information + LayerSources: nil, + } + if i, ok := w.manifestByConfig[configDigest]; ok { + item = &w.manifest[i] + if err := checkManifestItemsMatch(item, &newItem); err != nil { + return err + } + } else { + i := len(w.manifest) + w.manifestByConfig[configDigest] = i + w.manifest = append(w.manifest, newItem) + item = &w.manifest[i] + } + + knownRepoTags := map[string]struct{}{} + for _, repoTag := range item.RepoTags { + knownRepoTags[repoTag] = struct{}{} + } + for _, tag := range repoTags { + // For github.com/docker/docker consumers, this works just as well as + // refString := ref.String() + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon” patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) + + if _, ok := knownRepoTags[refString]; !ok { + item.RepoTags = append(item.RepoTags, refString) + knownRepoTags[refString] = struct{}{} + } + } + + return nil +} + +// Close writes all outstanding data about images to the archive, and finishes writing data +// to the underlying io.Writer. +// No more images can be added after this is called. +func (w *Writer) Close() error { + if err := w.lock(); err != nil { + return err + } + defer w.unlock() + + b, err := json.Marshal(&w.manifest) + if err != nil { + return err + } + if err := w.sendBytesLocked(manifestFileName, b); err != nil { + return err + } + + b, err = json.Marshal(w.repositories) + if err != nil { + return errors.Wrap(err, "marshaling repositories") + } + if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil { + return errors.Wrap(err, "writing config json file") + } + + if err := w.tar.Close(); err != nil { + return err + } + w.tar = nil // Mark the Writer as closed. + return nil +} + +// configPath returns a path we choose for storing a config with the specified digest. +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) configPath(configDigest digest.Digest) string { + return configDigest.Hex() + ".json" +} + +// physicalLayerPath returns a path we choose for storing a layer with the specified digest +// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format). +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string { + // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described + // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) + // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers + // in the root of the tarball. + return layerDigest.Hex() + ".tar" +} + +type tarFI struct { + path string + size int64 + isSymlink bool +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + if t.isSymlink { + return os.ModeSymlink + } + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() interface{} { + return nil +} + +// sendSymlinkLocked sends a symlink into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendSymlinkLocked(path string, target string) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) + if err != nil { + return nil + } + logrus.Debugf("Sending as tar link %s -> %s", path, target) + return w.tar.WriteHeader(hdr) +} + +// sendBytesLocked sends a path into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendBytesLocked(path string, b []byte) error { + return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b)) +} + +// sendFileLocked sends a file into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return nil + } + logrus.Debugf("Sending as tar file %s", path) + if err := w.tar.WriteHeader(hdr); err != nil { + return err + } + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + size, err := io.Copy(w.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go index 22d84931c85..d0a3f1be069 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -2,7 +2,6 @@ package docker import ( "fmt" - "io/ioutil" "net/url" "os" "path" @@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { continue } configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) + configBytes, err := os.ReadFile(configPath) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go index 3fed1995cb8..49fa410e91f 100644 --- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -2,7 +2,6 @@ package iolimits import ( "io" - "io/ioutil" "github.com/pkg/errors" ) @@ -47,7 +46,7 @@ const ( func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { limitedReader := io.LimitReader(reader, int64(limit+1)) - res, err := ioutil.ReadAll(limitedReader) + res, err := io.ReadAll(limitedReader) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go index 306220585b6..84bb656ac71 100644 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -3,7 +3,6 @@ package streamdigest import ( "fmt" "io" - "io/ioutil" "os" "github.com/containers/image/v5/internal/putblobdigest" @@ -16,7 +15,7 @@ import ( // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // If an error occurs, inputInfo is not modified. func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") if err != nil { return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) } diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go new file mode 100644 index 00000000000..3d8738db536 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -0,0 +1,168 @@ +package archive + +import ( + "context" + "io" + "os" + + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type ociArchiveImageDestination struct { + ref ociArchiveReference + unpackedDest types.ImageDestination + tempDirRef tempDirOCIRef +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { + tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return nil, errors.Wrapf(err, "creating oci reference") + } + unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageDestination{ref: ref, + unpackedDest: unpackedDest, + tempDirRef: tempDirRef}, nil +} + +// Reference returns the reference used to set up this destination. +func (d *ociArchiveImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any +// Close deletes the temp directory of the oci-archive image +func (d *ociArchiveImageDestination) Close() error { + defer func() { + err := d.tempDirRef.deleteTempDir() + logrus.Debugf("Error deleting temporary directory: %v", err) + }() + return d.unpackedDest.Close() +} + +func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { + return d.unpackedDest.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures +func (d *ociArchiveImageDestination) SupportsSignatures(ctx context.Context) error { + return d.unpackedDest.SupportsSignatures(ctx) +} + +func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression { + return d.unpackedDest.DesiredLayerCompression() +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { + return d.unpackedDest.AcceptsForeignLayerURLs() +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise +func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { + return d.unpackedDest.MustMatchRuntimeOS() +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.unpackedDest.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes the manifest to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +func (d *ociArchiveImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutManifest(ctx, m, instanceDigest) +} + +// PutSignatures writes a set of signatures to the destination. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociArchiveImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.unpackedDest.PutSignatures(ctx, signatures, instanceDigest) +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// after the directory is made, it is tarred up into a file and the directory is deleted +func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if err := d.unpackedDest.Commit(ctx, unparsedToplevel); err != nil { + return errors.Wrapf(err, "storing image %q", d.ref.image) + } + + // path of directory to tar up + src := d.tempDirRef.tempDirectory + // path to save tarred up file + dst := d.ref.resolvedFile + return tarDirectory(src, dst) +} + +// tar converts the directory at src and saves it to dst +func tarDirectory(src, dst string) error { + // input is a stream of bytes from the archive of the directory at path + input, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + return errors.Wrapf(err, "retrieving stream of bytes from %q", src) + } + + // creates the tar file + outFile, err := os.Create(dst) + if err != nil { + return errors.Wrapf(err, "creating tar file %q", dst) + } + defer outFile.Close() + + // copies the contents of the directory to the tar file + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + _, err = io.Copy(outFile, input) + + return err +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go new file mode 100644 index 00000000000..20b392dc0e1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -0,0 +1,122 @@ +package archive + +import ( + "context" + "io" + + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type ociArchiveImageSource struct { + ref ociArchiveReference + unpackedSrc types.ImageSource + tempDirRef tempDirOCIRef +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource untars the file and saves it in a temp directory +func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { + tempDirRef, err := createUntarTempDir(sys, ref) + if err != nil { + return nil, errors.Wrap(err, "creating temp directory") + } + + unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys) + if err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return nil, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + } + return nil, err + } + return &ociArchiveImageSource{ref: ref, + unpackedSrc: unpackedSrc, + tempDirRef: tempDirRef}, nil +} + +// LoadManifestDescriptor loads the manifest +// Deprecated: use LoadManifestDescriptorWithContext instead +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + return LoadManifestDescriptorWithContext(nil, imgRef) +} + +// LoadManifestDescriptorWithContext loads the manifest +func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociArchRef, ok := imgRef.(ociArchiveReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") + } + tempDirRef, err := createUntarTempDir(sys, ociArchRef) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "creating temp directory") + } + defer func() { + err := tempDirRef.deleteTempDir() + logrus.Debugf("Error deleting temporary directory: %v", err) + }() + + descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) + if err != nil { + return imgspecv1.Descriptor{}, errors.Wrap(err, "loading index") + } + return descriptor, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociArchiveImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +// Close deletes the temporary directory at dst +func (s *ociArchiveImageSource) Close() error { + defer func() { + err := s.tempDirRef.deleteTempDir() + logrus.Debugf("error deleting tmp dir: %v", err) + }() + return s.unpackedSrc.Close() +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + return s.unpackedSrc.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociArchiveImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return s.unpackedSrc.LayerInfosForCopy(ctx, instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go new file mode 100644 index 00000000000..4fa9127659b --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -0,0 +1,197 @@ +package archive + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/oci/internal" + ocilayout "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OCI archive +// it creates an oci-archive tar file by calling into the OCI transport +// tarring the directory created by oci and deleting the directory +var Transport = ociArchiveTransport{} + +type ociArchiveTransport struct{} + +// ociArchiveReference is an ImageReference for OCI Archive paths +type ociArchiveReference struct { + file string + resolvedFile string + image string +} + +func (t ociArchiveTransport) Name() string { + return "oci-archive" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix +// into an ImageReference. +func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + file, image := internal.SplitPathAndImage(reference) + return NewReference(file, image) +} + +// NewReference returns an OCI reference for a file and a image. +func NewReference(file, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err + } + + if err := internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil +} + +func (ref ociArchiveReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +func (ref ociArchiveReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.file, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +func (ref ociArchiveReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +func (ref ociArchiveReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set +func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// struct to store the ociReference and temporary directory returned by createOCIRef +type tempDirOCIRef struct { + tempDirectory string + ociRefExtracted types.ImageReference +} + +// deletes the temporary directory created +func (t *tempDirOCIRef) deleteTempDir() error { + return os.RemoveAll(t.tempDirectory) +} + +// createOCIRef creates the oci reference of the image +// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files +func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { + dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + if err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory") + } + ociRef, err := ocilayout.NewReference(dir, image) + if err != nil { + return tempDirOCIRef{}, err + } + + tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} + return tempDirRef, nil +} + +// creates the temporary directory and copies the tarred content to it +func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) { + tempDirRef, err := createOCIRef(sys, ref.image) + if err != nil { + return tempDirOCIRef{}, errors.Wrap(err, "creating oci reference") + } + src := ref.resolvedFile + dst := tempDirRef.tempDirectory + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + arch, err := os.Open(src) + if err != nil { + return tempDirOCIRef{}, err + } + defer arch.Close() + if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil { + if err := tempDirRef.deleteTempDir(); err != nil { + return tempDirOCIRef{}, errors.Wrapf(err, "deleting temp directory %q", tempDirRef.tempDirectory) + } + return tempDirOCIRef{}, errors.Wrapf(err, "untarring file %q", tempDirRef.tempDirectory) + } + return tempDirRef, nil +} diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go new file mode 100644 index 00000000000..c2012e50e02 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go new file mode 100644 index 00000000000..77e8fd87637 --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -0,0 +1,346 @@ +package layout + +import ( + "context" + "encoding/json" + "io" + "os" + "path/filepath" + "runtime" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageDestination struct { + ref ociReference + index imgspecv1.Index + sharedBlobDir string + acceptUncompressedLayers bool +} + +// newImageDestination returns an ImageDestination for writing to an existing directory. +func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + Annotations: make(map[string]string), + } + } + + d := &ociImageDestination{ref: ref, index: *index} + if sys != nil { + d.sharedBlobDir = sys.OCISharedBlobDirPath + d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ociImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ociImageDestination) Close() error { + return nil +} + +func (d *ociImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Pushing signatures for OCI images is not supported") +} + +func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.acceptUncompressedLayers { + return types.PreserveOriginal + } + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *ociImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ociImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") + if err != nil { + return types.BlobInfo{}, err + } + succeeded := false + explicitClosed := false + defer func() { + if !explicitClosed { + blobFile.Close() + } + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return types.BlobInfo{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } + } + + blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) + if err != nil { + return types.BlobInfo{}, err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return types.BlobInfo{}, err + } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return types.BlobInfo{}, err + } + succeeded = true + return types.BlobInfo{Digest: blobDigest, Size: size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) + if err != nil { + return false, types.BlobInfo{}, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, types.BlobInfo{}, nil + } + if err != nil { + return false, types.BlobInfo{}, err + } + + return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil +} + +// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, +// this should be either an OCI manifest (possibly converted to this format by the caller) or index, +// neither of which we'll need to modify further. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to overwrite the manifest for (when +// the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +// It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated +// by `manifest.Digest()`. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + var digest digest.Digest + var err error + if instanceDigest != nil { + digest = *instanceDigest + } else { + digest, err = manifest.Digest(m) + if err != nil { + return err + } + } + + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) + if err != nil { + return err + } + if err := ensureParentDirectoryExists(blobPath); err != nil { + return err + } + if err := os.WriteFile(blobPath, m, 0644); err != nil { + return err + } + + if instanceDigest != nil { + return nil + } + + // If we had platform information, we'd build an imgspecv1.Platform structure here. + + // Start filling out the descriptor for this entry + desc := imgspecv1.Descriptor{} + desc.Digest = digest + desc.Size = int64(len(m)) + if d.ref.image != "" { + desc.Annotations = make(map[string]string) + desc.Annotations[imgspecv1.AnnotationRefName] = d.ref.image + } + + // If we knew the MIME type, we wouldn't have to guess here. + desc.MediaType = manifest.GuessMIMEType(m) + + d.addManifest(&desc) + + return nil +} + +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + // If the new entry has a name, remove any conflicting names which we already have. + if desc.Annotations != nil && desc.Annotations[imgspecv1.AnnotationRefName] != "" { + // The name is being set on a new entry, so remove any older ones that had the same name. + // We might be storing an index and all of its component images, and we'll want to attach + // the name to the last one, which is the index. + for i, manifest := range d.index.Manifests { + if manifest.Annotations[imgspecv1.AnnotationRefName] == desc.Annotations[imgspecv1.AnnotationRefName] { + delete(d.index.Manifests[i].Annotations, imgspecv1.AnnotationRefName) + break + } + } + } + // If it has the same digest as another entry in the index, we already overwrote the file, + // so just pick up the other information. + for i, manifest := range d.index.Manifests { + if manifest.Digest == desc.Digest && manifest.Annotations[imgspecv1.AnnotationRefName] == "" { + // Replace it completely. + d.index.Manifests[i] = *desc + return + } + } + // It's a new entry to be added to the index. + d.index.Manifests = append(d.index.Manifests, *desc) +} + +// PutSignatures would add the given signatures to the oci layout (currently not supported). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for +// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. +func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if len(signatures) != 0 { + return errors.Errorf("Pushing signatures for OCI images is not supported") + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { + if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + return err + } + indexJSON, err := json.Marshal(d.index) + if err != nil { + return err + } + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go new file mode 100644 index 00000000000..8973f461c9a --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -0,0 +1,208 @@ +package layout + +import ( + "context" + "io" + "net/http" + "net/url" + "os" + "strconv" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/tlsclientconfig" + "github.com/containers/image/v5/types" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ociImageSource struct { + ref ociReference + index *imgspecv1.Index + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { + tr := tlsclientconfig.NewTransport() + tr.TLSClientConfig = tlsconfig.ServerDefault() + + if sys != nil && sys.OCICertPath != "" { + if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { + return nil, err + } + tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify + } + + client := &http.Client{} + client.Transport = tr + descriptor, err := ref.getManifestDescriptor() + if err != nil { + return nil, err + } + index, err := ref.getIndex() + if err != nil { + return nil, err + } + d := &ociImageSource{ref: ref, index: index, descriptor: descriptor, client: client} + if sys != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = sys.OCISharedBlobDirPath + } + return d, nil +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() error { + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + var err error + + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + for _, md := range s.index.Manifests { + if md.Digest == dig { + mimeType = md.MediaType + break + } + } + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) + if err != nil { + return nil, "", err + } + + m, err := os.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + if mimeType == "" { + mimeType = manifest.GuessMIMEType(m) + } + + return m, mimeType, nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ociImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + r, s, err := s.getExternalBlob(ctx, info.URLs) + if err != nil { + return nil, 0, err + } else if r != nil { + return r, s, nil + } + } + + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return [][]byte{}, nil +} + +// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. +// This function can return nil reader when no url is supported by this function. In this case, the caller +// should fallback to fetch the non-external blob (i.e. pull from the registry). +func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { + if len(urls) == 0 { + return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") + } + + errWrap := errors.New("failed fetching external blob from all urls") + hasSupportedURL := false + for _, u := range urls { + if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + continue // unsupported url. skip this url. + } + hasSupportedURL = true + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error()) + continue + } + + resp, err := s.client.Do(req) + if err != nil { + errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error()) + continue + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", u) + continue + } + + return resp.Body, getBlobSize(resp), nil + } + if !hasSupportedURL { + return nil, 0, nil // fallback to non-external blob + } + + return nil, 0, errWrap +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ociImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go new file mode 100644 index 00000000000..a99b631584d --- /dev/null +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -0,0 +1,264 @@ +package layout + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/oci/internal" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) + +type ociTransport struct{} + +func (t ociTransport) Name() string { + return "oci" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { + return internal.ValidateScope(scope) +} + +// ociReference is an ImageReference for OCI directory paths. +type ociReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + dir string // As specified by the user. May be relative, contain symlinks, etc. + resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. + // If image=="", it means the "only image" in the index.json is used in the case it is a source + // for destinations, the image name annotation "image.ref.name" is not added to the index.json + image string +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. +func ParseReference(reference string) (types.ImageReference, error) { + dir, image := internal.SplitPathAndImage(reference) + return NewReference(dir, image) +} + +// NewReference returns an OCI reference for a directory and a image. +// +// We do not expose an API supplying the resolvedDir; we could, but recomputing it +// is generally cheap enough that we prefer being confident about the properties of resolvedDir. +func NewReference(dir, image string) (types.ImageReference, error) { + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) + if err != nil { + return nil, err + } + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err + } + + if err = internal.ValidateImageName(image); err != nil { + return nil, err + } + + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil +} + +func (ref ociReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ociReference) StringWithinTransport() string { + return fmt.Sprintf("%s:%s", ref.dir, ref.image) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ociReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref ociReference) PolicyConfigurationIdentity() string { + // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the + // same image and the two can’t be statically disambiguated. Using at least the repository directory is + // less granular but hopefully still useful. + return ref.resolvedDir +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ociReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedDir + for { + lastSlash := strings.LastIndex(path, "/") + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by ociTransport.ValidatePolicyConfigurationScope above. + if lastSlash == -1 || path == "/" { + break + } + res = append(res, path) + path = path[:lastSlash] + } + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil +} + +func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { + index, err := ref.getIndex() + if err != nil { + return imgspecv1.Descriptor{}, err + } + + var d *imgspecv1.Descriptor + if ref.image == "" { + // return manifest if only one image is in the oci directory + if len(index.Manifests) == 1 { + d = &index.Manifests[0] + } else { + // ask user to choose image when more than one image in the oci directory + return imgspecv1.Descriptor{}, ErrMoreThanOneImage + } + } else { + // if image specified, look through all manifests for a match + for _, md := range index.Manifests { + if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { + continue + } + refName, ok := md.Annotations[imgspecv1.AnnotationRefName] + if !ok { + continue + } + if refName == ref.image { + d = &md + break + } + } + } + if d == nil { + return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) + } + return *d, nil +} + +// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name +// when pulling an image +func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { + ociRef, ok := imgRef.(ociReference) + if !ok { + return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") + } + return ociRef.getManifestDescriptor() +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for oci: images") +} + +// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. +func (ref ociReference) ociLayoutPath() string { + return filepath.Join(ref.dir, "oci-layout") +} + +// indexPath returns a path for the index.json within a directory using OCI conventions. +func (ref ociReference) indexPath() string { + return filepath.Join(ref.dir, "index.json") +} + +// blobPath returns a path for a blob within a directory using OCI image-layout conventions. +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { + if err := digest.Validate(); err != nil { + return "", errors.Wrapf(err, "unexpected digest reference %s", digest) + } + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go new file mode 100644 index 00000000000..a6473ae68f4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -0,0 +1,1193 @@ +package openshift + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/containers/storage/pkg/homedir" + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/http2" +) + +// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig. +// restTLSClientConfig contains settings to enable transport layer security +type restTLSClientConfig struct { + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +// restConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.Config. +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type restConfig struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig restTLSClientConfig + + // Server should be accessed without verifying the TLS + // certificate. For testing only. + Insecure bool +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. +// ClientConfig is used to make it easy to get an api server client +type clientConfig interface { + // ClientConfig returns a complete client config + ClientConfig() (*restConfig, error) +} + +// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. +func defaultClientConfig() clientConfig { + loadingRules := newOpenShiftClientConfigLoadingRules() + // REMOVED: Allowing command-line overriding of loadingRules + // REMOVED: clientcmd.ConfigOverrides + + clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) + + return clientConfig +} + +var recommendedHomeFile = path.Join(homedir.Get(), ".kube/config") + +// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. +// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. +// 1. --config value +// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file +func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { + chain := []string{} + + envVarFile := os.Getenv("KUBECONFIG") + if len(envVarFile) != 0 { + chain = append(chain, filepath.SplitList(envVarFile)...) + } else { + chain = append(chain, recommendedHomeFile) + } + + return &clientConfigLoadingRules{ + Precedence: chain, + // REMOVED: Migration support; run (oc login) to trigger migration + } +} + +// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type deferredLoadingClientConfig struct { + loadingRules *clientConfigLoadingRules + + clientConfig clientConfig +} + +// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { + return &deferredLoadingClientConfig{loadingRules: loadingRules} +} + +func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { + if config.clientConfig == nil { + // REMOVED: Support for concurrent use in multiple threads. + mergedConfig, err := config.loadingRules.Load() + if err != nil { + return nil, err + } + + // REMOVED: Interactive fallback support. + mergedClientConfig := newNonInteractiveClientConfig(*mergedConfig) + + config.clientConfig = mergedClientConfig + } + + return config.clientConfig, nil +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + mergedConfig, err := mergedClientConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: In-cluster service account configuration use. + + return mergedConfig, nil +} + +var ( + // DefaultCluster is the cluster config used when no other config is specified + // TODO: eventually apiserver should start on 443 and be secure by default + defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} + + // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name + envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} +) + +// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type directClientConfig struct { + config clientcmdConfig +} + +// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { + return &directClientConfig{config} +} + +// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. +// ClientConfig implements ClientConfig +func (config *directClientConfig) ClientConfig() (*restConfig, error) { + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + configAuthInfo := config.getAuthInfo() + configClusterInfo := config.getCluster() + + clientConfig := &restConfig{} + clientConfig.Host = configClusterInfo.Server + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + + // only try to read the auth information if we are secure + if isConfigTransportTLS(*clientConfig) { + var err error + // REMOVED: Support for interactive fallback. + userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) + if err != nil { + return nil, err + } + if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil { + return nil, err + } + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + if err = mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig); err != nil { + return nil, err + } + } + + return clientConfig, nil +} + +// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { + mergedConfig := &restConfig{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restConfig{} + configClientConfig.TLSClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.TLSClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + if err := mergo.MergeWithOverwrite(mergedConfig, configClientConfig); err != nil { + return nil, err + } + + return mergedConfig, nil +} + +// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identification +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { + mergedConfig := &restConfig{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.TLSClientConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.TLSClientConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.TLSClientConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.TLSClientConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + + // REMOVED: prompting for missing information. + return mergedConfig, nil +} + +// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. +// ConfirmUsable looks a particular context and determines if that particular part of the config is usable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *directClientConfig) ConfirmUsable() error { + var validationErrors []error + validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) + validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { + return newErrConfigurationInvalid([]error{errEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. +func (config *directClientConfig) getContextName() string { + // REMOVED: overrides support + return config.config.CurrentContext +} + +// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. +func (config *directClientConfig) getAuthInfoName() string { + // REMOVED: overrides support + return config.getContext().AuthInfo +} + +// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. +func (config *directClientConfig) getClusterName() string { + // REMOVED: overrides support + return config.getContext().Cluster +} + +// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. +func (config *directClientConfig) getContext() clientcmdContext { + contexts := config.config.Contexts + contextName := config.getContextName() + + var mergedContext clientcmdContext + if configContext, exists := contexts[contextName]; exists { + if err := mergo.MergeWithOverwrite(&mergedContext, configContext); err != nil { + logrus.Debugf("Can't merge configContext: %v", err) + } + } + // REMOVED: overrides support + + return mergedContext +} + +var ( + errEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + errEmptyCluster = errors.New("cluster has no server defined") +) + +//helper for checking certificate/key/CA +func validateFileIsReadable(name string) error { + answer, err := os.Open(name) + defer func() { + if err := answer.Close(); err != nil { + logrus.Debugf("Error closing %v: %v", name, err) + } + }() + return err +} + +// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { + var validationErrors []error + + if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { + return []error{errEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + err := validateFileIsReadable(clusterInfo.CertificateAuthority) + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { + var validationErrors []error + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + err := validateFileIsReadable(authInfo.ClientCertificate) + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + err := validateFileIsReadable(authInfo.ClientKey) + if err != nil { + validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. +func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { + authInfos := config.config.AuthInfos + authInfoName := config.getAuthInfoName() + + var mergedAuthInfo clientcmdAuthInfo + if configAuthInfo, exists := authInfos[authInfoName]; exists { + if err := mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo); err != nil { + logrus.Debugf("Can't merge configAuthInfo: %v", err) + } + } + // REMOVED: overrides support + + return mergedAuthInfo +} + +// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. +func (config *directClientConfig) getCluster() clientcmdCluster { + clusterInfos := config.config.Clusters + clusterInfoName := config.getClusterName() + + var mergedClusterInfo clientcmdCluster + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster); err != nil { + logrus.Debugf("Can't merge defaultCluster: %v", err) + } + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster); err != nil { + logrus.Debugf("Can't merge envVarCluster: %v", err) + } + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + if err := mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo); err != nil { + logrus.Debugf("Can't merge configClusterInfo: %v", err) + } + } + // REMOVED: overrides support + + return mergedClusterInfo +} + +// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregateErr []error + +// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func newAggregate(errlist []error) error { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregateErr(errs) +} + +// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. +// Error is part of the error interface. +func (agg aggregateErr) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// REMOVED: aggregateErr.Errors + +// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +var _ error = errConfigurationInvalid{} + +// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. + +// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) +} + +// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present +type clientConfigLoadingRules struct { + Precedence []string +} + +// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { + errlist := []error{} + + kubeConfigFiles := []string{} + + // REMOVED: explicit path support + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + + kubeconfigs := []*clientcmdConfig{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := loadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, errors.Wrapf(err, "loading config file \"%s\"", filename)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdNewConfig() + for _, kubeconfig := range kubeconfigs { + if err := mergo.MergeWithOverwrite(mapConfig, kubeconfig); err != nil { + return nil, err + } + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdNewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + if err := mergo.MergeWithOverwrite(nonMapConfig, kubeconfig); err != nil { + return nil, err + } + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdNewConfig() + if err := mergo.MergeWithOverwrite(config, mapConfig); err != nil { + return nil, err + } + if err := mergo.MergeWithOverwrite(config, nonMapConfig); err != nil { + return nil, err + } + + // REMOVED: Possibility to skip this. + if err := resolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + + return config, newAggregate(errlist) +} + +// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile +// LoadFromFile takes a filename and deserializes the contents into Config object +func loadFromFile(filename string) (*clientcmdConfig, error) { + kubeconfigBytes, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := load(kubeconfigBytes) + if err != nil { + return nil, err + } + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdAuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdCluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdContext{} + } + + return config, nil +} + +// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func load(data []byte) (*clientcmdConfig, error) { + config := clientcmdNewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + // Note: This does absolutely no kind/version checking or conversions. + data, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, config); err != nil { + return nil, err + } + return config, nil +} + +// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func resolveLocalPaths(config *clientcmdConfig) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) + } + + if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) + } + + if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. +func getClusterFileReferences(cluster *clientcmdCluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. +func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func resolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// restClientFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.RESTClientFor. +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { + // REMOVED: Configurable GroupVersion, Codec + // REMOVED: Configurable versionedAPIPath + baseURL, err := defaultServerURLFor(config) + if err != nil { + return nil, nil, err + } + + transport, err := transportFor(config) + if err != nil { + return nil, nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + // REMOVED: Configurable QPS, Burst, ContentConfig + // REMOVED: Actually returning a RESTClient object. + return baseURL, httpClient, nil +} + +// defaultServerURL is a modified copy of k8s.io/kubernetes/pkg/client/restclient.DefaultServerURL. +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { + if host == "" { + return nil, errors.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil { + return nil, err + } + if hostURL.Scheme == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // REMOVED: versionedAPIPath computation. + return hostURL, nil +} + +// defaultServerURLFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.defaultServerURLFor. +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerURLFor(config *restConfig) (*url.URL, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.TLSClientConfig.CAFile) != 0 || len(config.TLSClientConfig.CAData) != 0 + hasCert := len(config.TLSClientConfig.CertFile) != 0 || len(config.TLSClientConfig.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + // REMOVED: Configurable APIPath, GroupVersion + return defaultServerURL(host, defaultTLS) +} + +// transportFor is a modified copy of k8s.io/kubernetes/pkg/client/restclient.transportFor. +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func transportFor(config *restConfig) (http.RoundTripper, error) { + // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support + return transportNew(config) +} + +// isConfigTransportTLS is a modified copy of k8s.io/kubernetes/pkg/client/restclient.IsConfigTransportTLS. +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func isConfigTransportTLS(config restConfig) bool { + baseURL, err := defaultServerURLFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func transportNew(config *restConfig) (http.RoundTripper, error) { + // REMOVED: custom config.Transport support. + // Set transport level security + + var ( + rt http.RoundTripper + err error + ) + + rt, err = tlsCacheGet(config) + if err != nil { + return nil, err + } + + // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. + if len(config.Username) != 0 && len(config.BearerToken) != 0 { + return nil, errors.Errorf("username/password or bearer token may be set, but not both") + } + + return rt, nil +} + +// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. +func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { + // REMOVED: any actual caching + + // Get the TLS options for this client config + tlsConfig, err := tlsConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. + t := &http.Transport{ + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + } + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { + _ = http2.ConfigureTransport(t) + } + return t, nil +} + +// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func tlsConfigFor(c *restConfig) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { + return nil, nil + } + if c.HasCA() && c.Insecure { + return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) + MinVersion: tls.VersionTLS10, + InsecureSkipVerify: c.Insecure, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLSClientConfig.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.TLSClientConfig.CertData, c.TLSClientConfig.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *restConfig) error { + var err error + c.TLSClientConfig.CAData, err = dataFromSliceOrFile(c.TLSClientConfig.CAData, c.TLSClientConfig.CAFile) + if err != nil { + return err + } + + c.TLSClientConfig.CertData, err = dataFromSliceOrFile(c.TLSClientConfig.CertData, c.TLSClientConfig.CertFile) + if err != nil { + return err + } + + c.TLSClientConfig.KeyData, err = dataFromSliceOrFile(c.TLSClientConfig.KeyData, c.TLSClientConfig.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := os.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} + +// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. +// HasCA returns whether the configuration has a certificate authority or not. +func (c *restConfig) HasCA() bool { + return len(c.TLSClientConfig.CAData) > 0 || len(c.TLSClientConfig.CAFile) > 0 +} + +// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *restConfig) HasCertAuth() bool { + return len(c.TLSClientConfig.CertData) != 0 || len(c.TLSClientConfig.CertFile) != 0 +} + +// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type clientcmdConfig struct { + // Clusters is a map of referenceable names to cluster configs + Clusters clustersMap `json:"clusters"` + // AuthInfos is a map of referenceable names to user configs + AuthInfos authInfosMap `json:"users"` + // Contexts is a map of referenceable names to context configs + Contexts contextsMap `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` +} + +type clustersMap map[string]*clientcmdCluster + +func (m *clustersMap) UnmarshalJSON(data []byte) error { + var a []v1NamedCluster + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + cluster := e.Cluster // Allocates a new instance in each iteration + (*m)[e.Name] = &cluster + } + return nil +} + +type authInfosMap map[string]*clientcmdAuthInfo + +func (m *authInfosMap) UnmarshalJSON(data []byte) error { + var a []v1NamedAuthInfo + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + authInfo := e.AuthInfo // Allocates a new instance in each iteration + (*m)[e.Name] = &authInfo + } + return nil +} + +type contextsMap map[string]*clientcmdContext + +func (m *contextsMap) UnmarshalJSON(data []byte) error { + var a []v1NamedContext + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, e := range a { + context := e.Context // Allocates a new instance in each iteration + (*m)[e.Name] = &context + } + return nil +} + +// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func clientcmdNewConfig() *clientcmdConfig { + return &clientcmdConfig{ + Clusters: make(map[string]*clientcmdCluster), + AuthInfos: make(map[string]*clientcmdAuthInfo), + Contexts: make(map[string]*clientcmdContext), + } +} + +// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. +// Cluster contains information about how to communicate with a kubernetes cluster +type clientcmdCluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` +} + +// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type clientcmdAuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` +} + +// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type clientcmdContext struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` +} + +// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. +// NamedCluster relates nicknames to cluster information +type v1NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster clientcmdCluster `json:"cluster"` +} + +// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. +// NamedContext relates nicknames to context information +type v1NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context clientcmdContext `json:"context"` +} + +// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. +// NamedAuthInfo relates nicknames to auth information +type v1NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo clientcmdAuthInfo `json:"user"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go new file mode 100644 index 00000000000..67612d800c2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -0,0 +1,584 @@ +package openshift + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/image/v5/version" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// openshiftClient is configuration for dealing with a single image stream, for reading or writing. +type openshiftClient struct { + ref openshiftReference + baseURL *url.URL + // Values from Kubernetes configuration + httpClient *http.Client + bearerToken string // "" if not used + username string // "" if not used + password string // if username != "" +} + +// newOpenshiftClient creates a new openshiftClient for the specified reference. +func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { + // We have already done this parsing in ParseReference, but thrown away + // httpClient. So, parse again. + // (We could also rework/split restClientFor to "get base URL" to be done + // in ParseReference, and "get httpClient" to be done here. But until/unless + // we support non-default clusters, this is good enough.) + + // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. + cmdConfig := defaultClientConfig() + logrus.Debugf("cmdConfig: %#v", cmdConfig) + restConfig, err := cmdConfig.ClientConfig() + if err != nil { + return nil, err + } + // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) + logrus.Debugf("restConfig: %#v", restConfig) + baseURL, httpClient, err := restClientFor(restConfig) + if err != nil { + return nil, err + } + logrus.Debugf("URL: %#v", *baseURL) + + if httpClient == nil { + httpClient = http.DefaultClient + } + + return &openshiftClient{ + ref: ref, + baseURL: baseURL, + httpClient: httpClient, + bearerToken: restConfig.BearerToken, + username: restConfig.Username, + password: restConfig.Password, + }, nil +} + +// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. +func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { + url := *c.baseURL + url.Path = path + var requestBodyReader io.Reader + if requestBody != nil { + logrus.Debugf("Will send body: %s", requestBody) + requestBodyReader = bytes.NewReader(requestBody) + } + req, err := http.NewRequestWithContext(ctx, method, url.String(), requestBodyReader) + if err != nil { + return nil, err + } + + if len(c.bearerToken) != 0 { + req.Header.Set("Authorization", "Bearer "+c.bearerToken) + } else if len(c.username) != 0 { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Accept", "application/json, */*") + req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) + if requestBody != nil { + req.Header.Set("Content-Type", "application/json") + } + + logrus.Debugf("%s %s", method, url.Redacted()) + res, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody) + if err != nil { + return nil, err + } + logrus.Debugf("Got body: %s", body) + // FIXME: Just throwing this useful information away only to try to guess later... + logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) + + var status status + statusValid := false + if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { + statusValid = true + } + + switch { + case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. + if statusValid && status.Status != "Success" { + return nil, errors.New(status.Message) + } + case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: + // OK. + default: + if statusValid { + return nil, errors.New(status.Message) + } + return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) + } + + return body, nil +} + +// getImage loads the specified image object. +func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) + body, err := c.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + // Note: This does absolutely no kind/version checking or conversions. + var isi imageStreamImage + if err := json.Unmarshal(body, &isi); err != nil { + return nil, err + } + return &isi.Image, nil +} + +// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; +// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. +func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { + parts := strings.SplitN(ref, "/", 2) + if len(parts) != 2 { + return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) + } + return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil +} + +type openshiftImageSource struct { + client *openshiftClient + // Values specific to this image + sys *types.SystemContext + // State + docker types.ImageSource // The docker/distribution API endpoint, or nil if not resolved yet + imageStreamImageName string // Resolved image identifier, or "" if not known yet +} + +// newImageSource creates a new ImageSource for the specified reference. +// The caller must call .Close() on the returned ImageSource. +func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + return &openshiftImageSource{ + client: client, + sys: sys, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *openshiftImageSource) Reference() types.ImageReference { + return s.client.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *openshiftImageSource) Close() error { + if s.docker != nil { + err := s.docker.Close() + s.docker = nil + + return err + } + + return nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, "", err + } + return s.docker.GetManifest(ctx, instanceDigest) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, 0, err + } + return s.docker.GetBlob(ctx, info, cache) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageStreamImageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageStreamImageName = s.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + image, err := s.client.getImage(ctx, imageStreamImageName) + if err != nil { + return nil, err + } + var sigs [][]byte + for _, sig := range image.Signatures { + if sig.Type == imageSignatureTypeAtomic { + sigs = append(sigs, sig.Content) + } + } + return sigs, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} + +// ensureImageIsResolved sets up s.docker and s.imageStreamImageName +func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { + if s.docker != nil { + return nil + } + + // FIXME: validate components per validation.IsValidPathSegmentName? + path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) + body, err := s.client.doRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return err + } + // Note: This does absolutely no kind/version checking or conversions. + var is imageStream + if err := json.Unmarshal(body, &is); err != nil { + return err + } + var te *tagEvent + for _, tag := range is.Status.Tags { + if tag.Tag != s.client.ref.dockerReference.Tag() { + continue + } + if len(tag.Items) > 0 { + te = &tag.Items[0] + break + } + } + if te == nil { + return errors.Errorf("No matching tag found") + } + logrus.Debugf("tag event %#v", te) + dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) + if err != nil { + return err + } + logrus.Debugf("Resolved reference %#v", dockerRefString) + dockerRef, err := docker.ParseReference("//" + dockerRefString) + if err != nil { + return err + } + d, err := dockerRef.NewImageSource(ctx, s.sys) + if err != nil { + return err + } + s.docker = d + s.imageStreamImageName = te.Image + return nil +} + +type openshiftImageDestination struct { + client *openshiftClient + docker types.ImageDestination // The docker/distribution API endpoint + // State + imageStreamImageName string // "" if not yet known +} + +// newImageDestination creates a new ImageDestination for the specified reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { + client, err := newOpenshiftClient(ref) + if err != nil { + return nil, err + } + + // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, + // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know + // the manifest digest at this point. + dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) + dockerRef, err := docker.ParseReference(dockerRefString) + if err != nil { + return nil, err + } + docker, err := dockerRef.NewImageDestination(ctx, sys) + if err != nil { + return nil, err + } + + return &openshiftImageDestination{ + client: client, + docker: docker, + }, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *openshiftImageDestination) Reference() types.ImageReference { + return d.client.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *openshiftImageDestination) Close() error { + return d.docker.Close() +} + +func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { + return d.docker.SupportedManifestMIMETypes() +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.Compress +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) +} + +// PutManifest writes manifest to the destination. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest == nil { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return err + } + d.imageStreamImageName = manifestDigest.String() + } + return d.docker.PutManifest(ctx, m, instanceDigest) +} + +func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + var imageStreamImageName string + if instanceDigest == nil { + if d.imageStreamImageName == "" { + return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") + } + imageStreamImageName = d.imageStreamImageName + } else { + imageStreamImageName = instanceDigest.String() + } + + // Because image signatures are a shared resource in Atomic Registry, the default upload + // always adds signatures. Eventually we should also allow removing signatures. + + if len(signatures) == 0 { + return nil // No need to even read the old state. + } + + image, err := d.client.getImage(ctx, imageStreamImageName) + if err != nil { + return err + } + existingSigNames := map[string]struct{}{} + for _, sig := range image.Signatures { + existingSigNames[sig.objectMeta.Name] = struct{}{} + } + +sigExists: + for _, newSig := range signatures { + for _, existingSig := range image.Signatures { + if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { + continue sigExists + } + } + + // The API expect us to invent a new unique name. This is racy, but hopefully good enough. + var signatureName string + for { + randBytes := make([]byte, 16) + n, err := rand.Read(randBytes) + if err != nil || n != 16 { + return errors.Wrapf(err, "generating random signature len %d", n) + } + signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes) + if _, ok := existingSigNames[signatureName]; !ok { + break + } + } + // Note: This does absolutely no kind/version checking or conversions. + sig := imageSignature{ + typeMeta: typeMeta{ + Kind: "ImageSignature", + APIVersion: "v1", + }, + objectMeta: objectMeta{Name: signatureName}, + Type: imageSignatureTypeAtomic, + Content: newSig, + } + body, err := json.Marshal(sig) + if err != nil { + return err + } + _, err = d.client.doRequest(ctx, http.MethodPost, "/oapi/v1/imagesignatures", body) + if err != nil { + return err + } + } + + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *openshiftImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.docker.Commit(ctx, unparsedToplevel) +} + +// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. +type imageStream struct { + Status imageStreamStatus `json:"status,omitempty"` +} +type imageStreamStatus struct { + DockerImageRepository string `json:"dockerImageRepository"` + Tags []namedTagEventList `json:"tags,omitempty"` +} +type namedTagEventList struct { + Tag string `json:"tag"` + Items []tagEvent `json:"items"` +} +type tagEvent struct { + DockerImageReference string `json:"dockerImageReference"` + Image string `json:"image"` +} +type imageStreamImage struct { + Image image `json:"image"` +} +type image struct { + objectMeta `json:"metadata,omitempty"` + DockerImageReference string `json:"dockerImageReference,omitempty"` + // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest string `json:"dockerImageManifest,omitempty"` + // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` + Signatures []imageSignature `json:"signatures,omitempty"` +} + +const imageSignatureTypeAtomic string = "atomic" + +type imageSignature struct { + typeMeta `json:",inline"` + objectMeta `json:"metadata,omitempty"` + Type string `json:"type"` + Content []byte `json:"content"` + // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // ImageIdentity string `json:"imageIdentity,omitempty"` + // SignedClaims map[string]string `json:"signedClaims,omitempty"` + // Created *unversioned.Time `json:"created,omitempty"` + // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` + // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` +} +type typeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} +type objectMeta struct { + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + Namespace string `json:"namespace,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Generation int64 `json:"generation,omitempty"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status +type status struct { + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` + // Reason StatusReason `json:"reason,omitempty"` + // Details *StatusDetails `json:"details,omitempty"` + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go new file mode 100644 index 00000000000..6bbb43be283 --- /dev/null +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -0,0 +1,157 @@ +package openshift + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + genericImage "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for OpenShift registry-hosted images. +var Transport = openshiftTransport{} + +type openshiftTransport struct{} + +func (t openshiftTransport) Name() string { + return "atomic" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// Note that imageNameRegexp is namespace/stream:tag, this +// is HOSTNAME/namespace/stream:tag or parent prefixes. +// Keep this in sync with imageNameRegexp! +var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { + if scopeRegexp.FindStringIndex(scope) == nil { + return errors.Errorf("Invalid scope name %s", scope) + } + return nil +} + +// openshiftReference is an ImageReference for OpenShift images. +type openshiftReference struct { + dockerReference reference.NamedTagged + namespace string // Computed from dockerReference in advance. + stream string // Computed from dockerReference in advance. +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. +func ParseReference(ref string) (types.ImageReference, error) { + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) + } + tagged, ok := r.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) + } + return NewReference(tagged) +} + +// NewReference returns an OpenShift reference for a reference.NamedTagged +func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { + r := strings.SplitN(reference.Path(dockerRef), "/", 3) + if len(r) != 2 { + return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", + reference.FamiliarString(dockerRef)) + } + return openshiftReference{ + namespace: r[0], + stream: r[1], + dockerReference: dockerRef, + }, nil +} + +func (ref openshiftReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref openshiftReference) StringWithinTransport() string { + return reference.FamiliarString(ref.dockerReference) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref openshiftReference) DockerReference() reference.Named { + return ref.dockerReference +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref openshiftReference) PolicyConfigurationIdentity() string { + res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) + if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref openshiftReference) PolicyConfigurationNamespaces() []string { + return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(sys, ref) + if err != nil { + return nil, err + } + return genericImage.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for atomic: images") +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go new file mode 100644 index 00000000000..011118fa52b --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -0,0 +1,522 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/klauspost/pgzip" + "github.com/opencontainers/go-digest" + selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type blobToImport struct { + Size int64 + Digest digest.Digest + BlobPath string +} + +type descriptor struct { + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` +} + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type manifestSchema struct { + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` +} + +type ostreeImageDestination struct { + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo +} + +// newImageDestination returns an ImageDestination for writing to an existing ostree. +func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { + tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) + if err := ensureDirectoryExists(tmpDirPath); err != nil { + return nil, err + } + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *ostreeImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } + return os.RemoveAll(d.tmpDirPath) +} + +func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") + if err != nil { + return types.BlobInfo{}, err + } + + blobPath := filepath.Join(tmpDir, "content") + blobFile, err := os.Create(blobPath) + if err != nil { + return types.BlobInfo{}, err + } + defer blobFile.Close() + + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(blobFile, stream) + if err != nil { + return types.BlobInfo{}, err + } + blobDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + } + if err := blobFile.Sync(); err != nil { + return types.BlobInfo{}, err + } + + hash := blobDigest.Hex() + d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} + return types.BlobInfo{Digest: blobDigest, Size: size}, nil +} + +func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { + entries, err := os.ReadDir(dir) + if err != nil { + return err + } + + for _, entry := range entries { + fullpath := filepath.Join(dir, entry.Name()) + if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if err := os.Remove(fullpath); err != nil { + return err + } + continue + } + + info, err := entry.Info() + if err != nil { + return err + } + if selinuxHnd != nil { + relPath, err := filepath.Rel(root, fullpath) + if err != nil { + return err + } + // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, + // thus we benefit from maintaining the same SELinux label they would have on the host as we could + // use hard links instead of copying the files. + relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) + + relPathC := C.CString(relPath) + defer C.free(unsafe.Pointer(relPathC)) + var context *C.char + + res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) + if int(res) < 0 && err != syscall.ENOENT { + return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) + } + if int(res) == 0 { + defer C.freecon(context) + fullpathC := C.CString(fullpath) + defer C.free(unsafe.Pointer(fullpathC)) + res, err = C.lsetfilecon_raw(fullpathC, context) + if int(res) < 0 { + return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) + } + } + } + + if entry.IsDir() { + if usermode { + if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { + return err + } + } + err = fixFiles(selinuxHnd, root, fullpath, usermode) + if err != nil { + return err + } + } else if usermode && (entry.Type().IsRegular()) { + if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { + return err + } + } + } + + return nil +} + +func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { + opts := otbuiltin.NewCommitOptions() + opts.AddMetadataString = metadata + opts.Timestamp = time.Now() + // OCI layers have no parent OSTree commit + opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + _, err := repo.Commit(root, branch, opts) + return err +} + +func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { + mfz := pgzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return "", -1, err + } + defer stream.Close() + + gzReader, err := archive.DecompressStream(stream) + if err != nil { + return "", -1, err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return "", -1, err + } + + digester := digest.Canonical.Digester() + + written, err := io.Copy(digester.Hash(), its) + if err != nil { + return "", -1, err + } + + return digester.Digest(), written, nil +} + +func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") + if err := ensureDirectoryExists(destinationPath); err != nil { + return err + } + defer func() { + os.Remove(blob.BlobPath) + os.RemoveAll(destinationPath) + }() + + var tarSplitOutput bytes.Buffer + uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) + if err != nil { + return err + } + + if os.Getuid() == 0 { + if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { + return err + } + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { + return err + } + } else { + os.MkdirAll(destinationPath, 0755) + if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { + return err + } + + if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { + return err + } + } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), + fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, types.BlobInfo{}, err + } + d.repo = repo + } + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + found, data, err = readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, types.BlobInfo{}, err + } + + size, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return false, types.BlobInfo{}, err + } + + return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + d.manifest = string(manifestBlob) + + if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { + return err + } + + manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) + if err := ensureParentDirectoryExists(manifestPath); err != nil { + return err + } + + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + d.digest = digest + + return os.WriteFile(manifestPath, manifestBlob, 0644) +} + +// PutSignatures writes signatures to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported by "ostree:"`) + } + + path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) + if err := ensureParentDirectoryExists(path); err != nil { + return err + } + + for i, sig := range signatures { + signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) + if err := os.WriteFile(signaturePath, sig, 0644); err != nil { + return err + } + } + d.signaturesLen = len(signatures) + return nil +} + +func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := otbuiltin.OpenRepo(d.ref.repo) + if err != nil { + return err + } + + _, err = repo.PrepareTransaction() + if err != nil { + return err + } + + var selinuxHnd *C.struct_selabel_handle + + if os.Getuid() == 0 && selinux.GetEnabled() { + selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) + if selinuxHnd == nil { + return errors.Wrapf(err, "cannot open the SELinux DB") + } + + defer C.selabel_close(selinuxHnd) + } + + checkLayer := func(hash string) error { + blob := d.blobs[hash] + // if the blob is not present in d.blobs then it is already stored in OSTree, + // and we don't need to import it. + if blob == nil { + return nil + } + err := d.importBlob(selinuxHnd, repo, blob) + if err != nil { + return err + } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) + if err != nil { + return err + } + } + + manifestPath := filepath.Join(d.tmpDirPath, "manifest") + + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} + if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { + return err + } + + _, err = repo.CommitTransaction() + return err +} + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go new file mode 100644 index 00000000000..1e1f2be03cd --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -0,0 +1,430 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo + // get the compressed layer by its uncompressed checksum + compressed map[digest.Digest]digest.Digest +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { + var metadataKey string + if isCompressed { + metadataKey = "docker.uncompressed_size" + } else { + metadataKey = "docker.size" + } + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, metadataKey) + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be non-default instances. +func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + + blob := info.Digest.Hex() + + // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. + if s.compressed == nil { + _, err := s.LayerInfosForCopy(ctx, nil) + if err != nil { + return nil, -1, err + } + + } + compressedBlob, isCompressed := s.compressed[info.Digest] + if isCompressed { + blob = compressedBlob.Hex() + } + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := pgzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + mfz.Close() + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + rc := ioutils.NewReadCloserWrapper(ots, func() error { + getter.Close() + mfz.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := os.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + if instanceDigest != nil { + return nil, errors.New(`Manifest lists are not supported by "ostree:"`) + } + + updatedBlobInfos := []types.BlobInfo{} + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) + if err != nil { + return nil, err + } + + man, err := manifest.FromBlob(manifestBlob, manifestType) + + s.compressed = make(map[digest.Digest]digest.Digest) + + layerBlobs := man.LayerInfos() + + for _, layerBlob := range layerBlobs { + branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) + found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") + if err != nil || !found { + return nil, err + } + + found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") + if err != nil || !found { + return nil, err + } + + uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) + if err != nil { + return nil, err + } + uncompressedDigest := digest.Digest(uncompressedDigestStr) + blobInfo := types.BlobInfo{ + Digest: uncompressedDigest, + Size: uncompressedSize, + MediaType: layerBlob.MediaType, + } + s.compressed[uncompressedDigest] = layerBlob.Digest + updatedBlobInfos = append(updatedBlobInfos, blobInfo) + } + return updatedBlobInfos, nil +} diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go new file mode 100644 index 00000000000..1e35ab6059f --- /dev/null +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -0,0 +1,253 @@ +//go:build containers_image_ostree +// +build containers_image_ostree + +package ostree + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +const defaultOSTreeRepo = "/ostree/repo" + +// Transport is an ImageTransport for ostree paths. +var Transport = ostreeTransport{} + +type ostreeTransport struct{} + +func (t ostreeTransport) Name() string { + return "ostree" +} + +func init() { + transports.Register(Transport) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { + sep := strings.Index(scope, ":") + if sep < 0 { + return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) + } + repo := scope[:sep] + + if !strings.HasPrefix(repo, "/") { + return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) + } + cleaned := filepath.Clean(repo) + if cleaned != repo { + return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + // FIXME? In the namespaces within a repo, + // we could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// ostreeReference is an ImageReference for ostree paths. +type ostreeReference struct { + image string + branchName string + repo string +} + +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + +func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { + var repo = "" + var image = "" + s := strings.SplitN(ref, "@/", 2) + if len(s) == 1 { + image, repo = s[0], defaultOSTreeRepo + } else { + image, repo = s[0], "/"+s[1] + } + + return NewReference(image, repo) +} + +// NewReference returns an OSTree reference for a specified repo and image. +func NewReference(image string, repo string) (types.ImageReference, error) { + // image is not _really_ in a containers/image/docker/reference format; + // as far as the libOSTree ociimage/* namespace is concerned, it is more or + // less an arbitrary string with an implied tag. + // Parse the image using reference.ParseNormalizedNamed so that we can + // check whether the images has a tag specified and we can add ":latest" if needed + ostreeImage, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if reference.IsNameOnly(ostreeImage) { + image = image + ":latest" + } + + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) + if err != nil { + // With os.IsNotExist(err), the parent directory of repo is also not existent; + // that should ordinarily not happen, but it would be a bit weird to reject + // references which do not specify a repo just because the implicit defaultOSTreeRepo + // does not exist. + if os.IsNotExist(err) && repo == defaultOSTreeRepo { + resolved = repo + } else { + return nil, err + } + } + // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces + // from being ambiguous with values of PolicyConfigurationIdentity. + if strings.Contains(resolved, ":") { + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) + } + + return ostreeReference{ + image: image, + branchName: encodeOStreeRef(image), + repo: resolved, + }, nil +} + +func (ref ostreeReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. +func (ref ostreeReference) StringWithinTransport() string { + return fmt.Sprintf("%s@%s", ref.image, ref.repo) +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref ostreeReference) DockerReference() reference.Named { + return nil +} + +func (ref ostreeReference) PolicyConfigurationIdentity() string { + return fmt.Sprintf("%s:%s", ref.repo, ref.image) +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref ostreeReference) PolicyConfigurationNamespaces() []string { + s := strings.SplitN(ref.image, ":", 2) + if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) + } + name := s[0] + res := []string{} + for { + res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + return res +} + +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + src, err := newImageSource(tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageSource(tmpDir, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + var tmpDir string + if sys == nil || sys.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = sys.OSTreeTmpDirPath + } + return newImageDestination(ref, tmpDir) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.Errorf("Deleting images not implemented for ostree: images") +} + +var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) + +func encodeOStreeRef(in string) string { + var buffer bytes.Buffer + for i := range in { + sub := in[i : i+1] + if ostreeRefRegexp.MatchString(sub) { + buffer.WriteString(sub) + } else { + buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) + } + + } + return buffer.String() +} + +// manifestPath returns a path for the manifest within a ostree using our conventions. +func (ref ostreeReference) manifestPath() string { + return filepath.Join("manifest", "manifest.json") +} + +// signaturePath returns a path for a signature within a ostree using our conventions. +func (ref ostreeReference) signaturePath(index int) string { + return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index c28e8179296..34c90dd77e4 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -5,7 +5,6 @@ import ( "compress/bzip2" "fmt" "io" - "io/ioutil" "github.com/containers/image/v5/pkg/compression/internal" "github.com/containers/image/v5/pkg/compression/types" @@ -65,7 +64,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil + return io.NopCloser(bzip2.NewReader(r)), nil } // XzDecompressor is a DecompressorFunc for the xz compression algorithm. @@ -74,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) { if err != nil { return nil, err } - return ioutil.NopCloser(r), nil + return io.NopCloser(r), nil } // gzipCompressor is a CompressorFunc for the gzip compression algorithm. @@ -161,7 +160,7 @@ func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { return nil, false, errors.Wrapf(err, "initializing decompression") } } else { - res = ioutil.NopCloser(stream) + res = io.NopCloser(stream) } return res, decompressor != nil, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 1d73dc405e7..d0bdd08e9a7 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -4,7 +4,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -15,6 +14,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/hashicorp/go-multierror" @@ -543,7 +543,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { var auths dockerConfigFile - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { if os.IsNotExist(err) { auths.AuthConfigs = map[string]dockerAuthConfig{} @@ -605,7 +605,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return "", errors.Wrapf(err, "marshaling JSON %q", path) } - if err = ioutil.WriteFile(path, newData, 0600); err != nil { + if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { return "", errors.Wrapf(err, "writing to file %q", path) } } diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go new file mode 100644 index 00000000000..46c10ff631a --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -0,0 +1,477 @@ +package shortnames + +import ( + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/manifoldco/promptui" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/term" +) + +// IsShortName returns true if the specified input is a "short name". A "short +// name" refers to a container image without a fully-qualified reference, and +// is hence missing a registry (or domain). Names including a digest are not +// short names. +// +// Examples: +// * short names: "image:tag", "library/fedora" +// * not short names: "quay.io/image", "localhost/image:tag", +// "server.org:5000/lib/image", "image@sha256:..." +func IsShortName(input string) bool { + isShort, _, _ := parseUnnormalizedShortName(input) + return isShort +} + +// parseUnnormalizedShortName parses the input and returns if it's short name, +// the unnormalized reference.Named, and a parsing error. +func parseUnnormalizedShortName(input string) (bool, reference.Named, error) { + ref, err := reference.Parse(input) + if err != nil { + return false, nil, errors.Wrapf(err, "cannot parse input: %q", input) + } + + named, ok := ref.(reference.Named) + if !ok { + return true, nil, errors.Errorf("%q is not a named reference", input) + } + + registry := reference.Domain(named) + if strings.ContainsAny(registry, ".:") || registry == "localhost" { + // A final parse to make sure that docker.io references are correctly + // normalized (e.g., docker.io/alpine to docker.io/library/alpine. + named, err = reference.ParseNormalizedNamed(input) + if err != nil { + return false, nil, errors.Wrapf(err, "cannot normalize input: %q", input) + } + return false, named, nil + } + + return true, named, nil +} + +// splitUserInput parses the user-specified reference. Namely, it strips off +// the tag or digest and stores it in the return values so that both can be +// re-added to a possible resolved alias' or USRs at a later point. +func splitUserInput(named reference.Named) (isTagged bool, isDigested bool, normalized reference.Named, tag string, digest digest.Digest) { + normalized = named + + tagged, isT := named.(reference.NamedTagged) + if isT { + isTagged = true + tag = tagged.Tag() + } + + digested, isD := named.(reference.Digested) + if isD { + isDigested = true + digest = digested.Digest() + } + + // Strip off tag/digest if present. + normalized = reference.TrimNamed(named) + + return +} + +// Add records the specified name-value pair as a new short-name alias to the +// user-specific aliases.conf. It may override an existing alias for `name`. +func Add(ctx *types.SystemContext, name string, value reference.Named) error { + isShort, _, err := parseUnnormalizedShortName(name) + if err != nil { + return err + } + if !isShort { + return errors.Errorf("%q is not a short name", name) + } + return sysregistriesv2.AddShortNameAlias(ctx, name, value.String()) +} + +// Remove clears the short-name alias for the specified name. It throws an +// error in case name does not exist in the machine-generated +// short-name-alias.conf. In such case, the alias must be specified in one of +// the registries.conf files, which is the users' responsibility. +func Remove(ctx *types.SystemContext, name string) error { + isShort, _, err := parseUnnormalizedShortName(name) + if err != nil { + return err + } + if !isShort { + return errors.Errorf("%q is not a short name", name) + } + return sysregistriesv2.RemoveShortNameAlias(ctx, name) +} + +// Resolved encapsulates all data for a resolved image name. +type Resolved struct { + PullCandidates []PullCandidate + + userInput reference.Named + systemContext *types.SystemContext + rationale rationale + originDescription string +} + +func (r *Resolved) addCandidate(named reference.Named) { + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r}) +} + +func (r *Resolved) addCandidateToRecord(named reference.Named) { + r.PullCandidates = append(r.PullCandidates, PullCandidate{named, true, r}) +} + +// Allows to reason over pull errors and add some context information. +// Used in (*Resolved).WrapPullError. +type rationale int + +const ( + // No additional context. + rationaleNone rationale = iota + // Resolved value is a short-name alias. + rationaleAlias + // Resolved value has been completed with an Unqualified Search Registry. + rationaleUSR + // Resolved value has been selected by the user (via the prompt). + rationaleUserSelection + // Resolved value has been enforced to use Docker Hub (via SystemContext). + rationaleEnforcedDockerHub +) + +// Description returns a human-readable description about the resolution +// process (e.g., short-name alias, unqualified-search registries, etc.). +// It is meant to be printed before attempting to pull the pull candidates +// to make the short-name resolution more transparent to user. +// +// If the returned string is empty, it is not meant to be printed. +func (r *Resolved) Description() string { + switch r.rationale { + case rationaleAlias: + return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) + case rationaleUSR: + return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) + case rationaleEnforcedDockerHub: + return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription) + case rationaleUserSelection, rationaleNone: + fallthrough + default: + return "" + } +} + +// FormatPullErrors is a convenience function to format errors that occurred +// while trying to pull all of the resolved pull candidates. +// +// Note that nil is returned if len(pullErrors) == 0. Otherwise, the amount of +// pull errors must equal the amount of pull candidates. +func (r *Resolved) FormatPullErrors(pullErrors []error) error { + if len(pullErrors) >= 0 && len(pullErrors) != len(r.PullCandidates) { + pullErrors = append(pullErrors, + errors.Errorf("internal error: expected %d instead of %d errors for %d pull candidates", + len(r.PullCandidates), len(pullErrors), len(r.PullCandidates))) + } + + switch len(pullErrors) { + case 0: + return nil + case 1: + return pullErrors[0] + default: + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%d errors occurred while pulling:", len(pullErrors))) + for _, e := range pullErrors { + sb.WriteString("\n * ") + sb.WriteString(e.Error()) + } + return errors.New(sb.String()) + } +} + +// PullCandidate is a resolved name. Once the Value has been used +// successfully, users MUST call `(*PullCandidate).Record(..)` to possibly +// record it as a new short-name alias. +type PullCandidate struct { + // Fully-qualified reference with tag or digest. + Value reference.Named + // Control whether to record it permanently as an alias. + record bool + + // Backwards pointer to the Resolved "parent". + resolved *Resolved +} + +// Record may store a short-name alias for the PullCandidate. +func (c *PullCandidate) Record() error { + if !c.record { + return nil + } + + // Strip off tags/digests from name/value. + name := reference.TrimNamed(c.resolved.userInput) + value := reference.TrimNamed(c.Value) + + if err := Add(c.resolved.systemContext, name.String(), value); err != nil { + return errors.Wrapf(err, "recording short-name alias (%q=%q)", c.resolved.userInput, c.Value) + } + return nil +} + +// Resolve resolves the specified name to either one or more fully-qualified +// image references that the short name may be *pulled* from. If the specified +// name is already a fully-qualified reference (i.e., not a short name), it is +// returned as is. In case, it's a short name, it's resolved according to the +// ShortNameMode in the SystemContext (if specified) or in the registries.conf. +// +// Note that tags and digests are stripped from the specified name before +// looking up an alias. Stripped off tags and digests are later on appended to +// all candidates. If neither tag nor digest is specified, candidates are +// normalized with the "latest" tag. An error is returned if there is no +// matching alias and no unqualified-search registries are configured. +// +// Note that callers *must* call `(PullCandidate).Record` after a returned +// item has been pulled successfully; this callback will record a new +// short-name alias (depending on the specified short-name mode). +// +// Furthermore, before attempting to pull callers *should* call +// `(Resolved).Description` and afterwards use +// `(Resolved).FormatPullErrors` in case of pull errors. +func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { + resolved := &Resolved{} + + // Create a copy of the system context to make it usable beyond this + // function call. + if ctx != nil { + copy := *ctx + ctx = © + } + resolved.systemContext = ctx + + // Detect which mode we're running in. + mode, err := sysregistriesv2.GetShortNameMode(ctx) + if err != nil { + return nil, err + } + + // Sanity check the short-name mode. + switch mode { + case types.ShortNameModeDisabled, types.ShortNameModePermissive, types.ShortNameModeEnforcing: + // We're good. + default: + return nil, errors.Errorf("unsupported short-name mode (%v)", mode) + } + + isShort, shortRef, err := parseUnnormalizedShortName(name) + if err != nil { + return nil, err + } + if !isShort { // no short name + resolved.addCandidate(shortRef) + return resolved, nil + } + + // Resolve to docker.io only if enforced by the caller (e.g., Podman's + // Docker-compatible REST API). + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "cannot normalize input: %q", name) + } + resolved.addCandidate(named) + resolved.rationale = rationaleEnforcedDockerHub + resolved.originDescription = "enforced by caller" + return resolved, nil + } + + // Strip off the tag to normalize the short name for looking it up in + // the config files. + isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) + resolved.userInput = shortNameRepo + + // If there's already an alias, use it. + namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String()) + if err != nil { + return nil, err + } + + // Always use an alias if present. + if namedAlias != nil { + if isTagged { + namedAlias, err = reference.WithTag(namedAlias, tag) + if err != nil { + return nil, err + } + } + if isDigested { + namedAlias, err = reference.WithDigest(namedAlias, digest) + if err != nil { + return nil, err + } + } + resolved.addCandidate(namedAlias) + resolved.rationale = rationaleAlias + resolved.originDescription = aliasOriginDescription + return resolved, nil + } + + resolved.rationale = rationaleUSR + + // Query the registry for unqualified-search registries. + unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(ctx) + if err != nil { + return nil, err + } + // Error out if there's no matching alias and no search registries. + if len(unqualifiedSearchRegistries) == 0 { + if usrConfig != "" { + return nil, errors.Errorf("short-name %q did not resolve to an alias and no unqualified-search registries are defined in %q", name, usrConfig) + } + return nil, errors.Errorf("short-name %q did not resolve to an alias and no containers-registries.conf(5) was found", name) + } + resolved.originDescription = usrConfig + + for _, reg := range unqualifiedSearchRegistries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + resolved.addCandidate(named) + } + + // If we're running in disabled, return the candidates without + // prompting (and without recording). + if mode == types.ShortNameModeDisabled { + return resolved, nil + } + + // If we have only one candidate, there's no ambiguity. + if len(resolved.PullCandidates) == 1 { + return resolved, nil + } + + // If we don't have a TTY, act according to the mode. + if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stdin.Fd())) { + switch mode { + case types.ShortNameModePermissive: + // Permissive falls back to using all candidates. + return resolved, nil + case types.ShortNameModeEnforcing: + // Enforcing errors out without a prompt. + return nil, errors.New("short-name resolution enforced but cannot prompt without a TTY") + default: + // We should not end up here. + return nil, errors.Errorf("unexpected short-name mode (%v) during resolution", mode) + } + } + + // We have a TTY, and can prompt the user with a selection of all + // possible candidates. + strCandidates := []string{} + for _, candidate := range resolved.PullCandidates { + strCandidates = append(strCandidates, candidate.Value.String()) + } + prompt := promptui.Select{ + Label: "Please select an image", + Items: strCandidates, + HideHelp: true, // do not show navigation help + } + + _, selection, err := prompt.Run() + if err != nil { + return nil, err + } + + named, err := reference.ParseNormalizedNamed(selection) + if err != nil { + return nil, errors.Wrapf(err, "selection %q is not a valid reference", selection) + } + + resolved.PullCandidates = nil + resolved.addCandidateToRecord(named) + resolved.rationale = rationaleUserSelection + + return resolved, nil +} + +// ResolveLocally resolves the specified name to either one or more local +// images. If the specified name is already a fully-qualified reference (i.e., +// not a short name), it is returned as is. In case, it's a short name, the +// returned slice of named references looks as follows: +// +// 1) If present, the short-name alias +// 2) "localhost/" as used by many container engines such as Podman and Buildah +// 3) Unqualified-search registries from the registries.conf files +// +// Note that tags and digests are stripped from the specified name before +// looking up an alias. Stripped off tags and digests are later on appended to +// all candidates. If neither tag nor digest is specified, candidates are +// normalized with the "latest" tag. The returned slice contains at least one +// item. +func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, error) { + isShort, shortRef, err := parseUnnormalizedShortName(name) + if err != nil { + return nil, err + } + if !isShort { // no short name + named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed + return []reference.Named{named}, nil + } + + var candidates []reference.Named + + // Complete the candidates with the specified registries. + completeCandidates := func(registries []string) ([]reference.Named, error) { + for _, reg := range registries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + candidates = append(candidates, named) + } + return candidates, nil + } + + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + return completeCandidates([]string{"docker.io"}) + } + + // Strip off the tag to normalize the short name for looking it up in + // the config files. + isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) + + // If there's already an alias, use it. + namedAlias, _, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String()) + if err != nil { + return nil, err + } + if namedAlias != nil { + if isTagged { + namedAlias, err = reference.WithTag(namedAlias, tag) + if err != nil { + return nil, err + } + } + if isDigested { + namedAlias, err = reference.WithDigest(namedAlias, digest) + if err != nil { + return nil, err + } + } + namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed + candidates = append(candidates, namedAlias) + } + + // Query the registry for unqualified-search registries. + unqualifiedSearchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(ctx) + if err != nil { + return nil, err + } + + // Note that "localhost" has precedence over the unqualified-search registries. + return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index c5df241b70e..c1753c8457a 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -2,6 +2,7 @@ package sysregistriesv2 import ( "fmt" + "io/fs" "os" "path/filepath" "reflect" @@ -643,17 +644,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) { dirPaths = append(dirPaths, wrapper.userConfigDirPath) } for _, dirPath := range dirPaths { - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: + case d == nil: // this should only happen when err != nil but let's be sure return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 7e2142b1f58..c766417d0ec 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -2,7 +2,6 @@ package tlsclientconfig import ( "crypto/tls" - "io/ioutil" "net" "net/http" "os" @@ -19,7 +18,7 @@ import ( // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) + fs, err := os.ReadDir(dir) if err != nil { if os.IsNotExist(err) { return nil @@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { fullPath := filepath.Join(dir, f.Name()) if strings.HasSuffix(f.Name(), ".crt") { logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) + data, err := os.ReadFile(fullPath) if err != nil { if os.IsNotExist(err) { // Dangling symbolic link? @@ -81,7 +80,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { return nil } -func hasFile(files []os.FileInfo, name string) bool { +func hasFile(files []os.DirEntry, name string) bool { for _, f := range files { if f.Name() == name { return true diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go new file mode 100644 index 00000000000..70758ad4399 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -0,0 +1,210 @@ +package sif + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +// injectedScriptTargetPath is the path injectedScript should be written to in the created image. +const injectedScriptTargetPath = "/podman/runscript" + +// parseDefFile parses a SIF definition file from reader, +// and returns non-trivial contents of the %environment and %runscript sections. +func parseDefFile(reader io.Reader) ([]string, []string, error) { + type parserState int + const ( + parsingOther parserState = iota + parsingEnvironment + parsingRunscript + ) + + environment := []string{} + runscript := []string{} + + state := parsingOther + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + switch { + case s == `%environment`: + state = parsingEnvironment + case s == `%runscript`: + state = parsingRunscript + case strings.HasPrefix(s, "%"): + state = parsingOther + case state == parsingEnvironment: + if s != "" && !strings.HasPrefix(s, "#") { + environment = append(environment, s) + } + case state == parsingRunscript: + runscript = append(runscript, s) + default: // parsingOther: ignore the line + } + } + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err) + } + return environment, runscript, nil +} + +// generateInjectedScript generates a shell script based on +// SIF definition file %environment and %runscript data, and returns it. +func generateInjectedScript(environment []string, runscript []string) []byte { + script := fmt.Sprintf("#!/bin/bash\n"+ + "%s\n"+ + "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n")) + return []byte(script) +} + +// processDefFile finds sif.DataDeffile in sifImage, if any, +// and returns: +// - the command to run +// - contents of a script to inject as injectedScriptTargetPath, or nil +func processDefFile(sifImage *sif.FileImage) (string, []byte, error) { + var environment, runscript []string + + desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile)) + if err == nil { + environment, runscript, err = parseDefFile(desc.GetReader()) + if err != nil { + return "", nil, err + } + } + + var command string + var injectedScript []byte + if len(environment) == 0 && len(runscript) == 0 { + command = "bash" + injectedScript = nil + } else { + injectedScript = generateInjectedScript(environment, runscript) + command = injectedScriptTargetPath + } + + return command, injectedScript, nil +} + +func writeInjectedScript(extractedRootPath string, injectedScript []byte) error { + if injectedScript == nil { + return nil + } + filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath) + parentDirPath := filepath.Dir(filePath) + if err := os.MkdirAll(parentDirPath, 0755); err != nil { + return fmt.Errorf("creating %s: %w", parentDirPath, err) + } + if err := os.WriteFile(filePath, injectedScript, 0755); err != nil { + return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) + } + return nil +} + +// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath. +// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use, +// if necessary. +func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error { + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + defer os.RemoveAll(extractedRootPath) + + // Almost everything in extractedRootPath comes from squashFSPath. + conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", + extractedRootPath, squashFSPath, extractedRootPath, tarPath) + script := "#!/bin/sh\n" + conversionCommand + "\n" + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { + return err + } + defer os.Remove(scriptPath) + + // On top of squashFSPath, we only add injectedScript, if necessary. + if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil { + return err + } + + logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand) + cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("converting image: %w, output: %s", err, string(output)) + } + logrus.Debugf("... finished converting squashfs to tar") + return nil +} + +// convertSIFToElements processes sifImage and creates/returns +// the relevant elements for constructing an OCI-like image: +// - A path to a tar file containing a root filesystem, +// - A command to run. +// The returned tar file path is inside tempDir, which can be assumed to be empty +// at start, and is exclusively used by the current process (i.e. it is safe +// to use hard-coded relative paths within it). +func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { + // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive, + // so we can just hard-code a set of unique values here. + // We create and/or manage cleanup of these two paths. + squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") + tarPath := filepath.Join(tempDir, "rootfs.tar") + // We only allocate these paths, the user is responsible for cleaning them up. + extractedRootPath := filepath.Join(tempDir, "rootfs") + scriptPath := filepath.Join(tempDir, "script") + + succeeded := false + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need + // to run both creation and consumption of extractedRootPath in the same fakeroot context. + // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2 + // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time. + // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser + // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy. + defer os.Remove(squashFSPath) + defer func() { + if !succeeded { + os.Remove(tarPath) + } + }() + + command, injectedScript, err := processDefFile(sifImage) + if err != nil { + return "", nil, err + } + + rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys)) + if err != nil { + return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err) + } + // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4 + // has an -o option that allows extracting a squashfs from the SIF file directly, + // but that version is not currently available in RHEL 8. + logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) + if err := func() error { // A scope for defer + f, err := os.Create(squashFSPath) + if err != nil { + return err + } + defer f.Close() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { + return err + } + return nil + }(); err != nil { + return "", nil, err + } + logrus.Debugf("... finished creating a temporary squashfs image") + + if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil { + return "", nil, err + } + succeeded = true + return tarPath, []string{command}, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go new file mode 100644 index 00000000000..ccf1259660e --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -0,0 +1,216 @@ +package sif + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +type sifImageSource struct { + ref sifReference + workDir string + layerDigest digest.Digest + layerSize int64 + layerFile string + config []byte + configDigest digest.Digest + manifest []byte +} + +// getBlobInfo returns the digest, and size of the provided file. +func getBlobInfo(path string) (digest.Digest, int64, error) { + f, err := os.Open(path) + if err != nil { + return "", -1, fmt.Errorf("opening %q for reading: %w", path, err) + } + defer f.Close() + + // TODO: Instead of writing the tar file to disk, and reading + // it here again, stream the tar file to a pipe and + // compute the digest while writing it to disk. + logrus.Debugf("Computing a digest of the SIF conversion output...") + digester := digest.Canonical.Digester() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(digester.Hash(), f) + if err != nil { + return "", -1, fmt.Errorf("reading %q: %w", path, err) + } + digest := digester.Digest() + logrus.Debugf("... finished computing the digest of the SIF conversion output") + + return digest, size, nil +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource extracts SIF objects and saves them in a temp directory. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) { + sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) + if err != nil { + return nil, fmt.Errorf("loading SIF file: %w", err) + } + defer func() { + _ = sifImg.UnloadContainer() + }() + + workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + os.RemoveAll(workDir) + } + }() + + layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir) + if err != nil { + return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err) + } + + layerDigest, layerSize, err := getBlobInfo(layerPath) + if err != nil { + return nil, fmt.Errorf("gathering blob information: %w", err) + } + + created := sifImg.ModifiedAt() + config := imgspecv1.Image{ + Created: &created, + Architecture: sifImg.PrimaryArch(), + OS: "linux", + Config: imgspecv1.ImageConfig{ + Cmd: commandLine, + }, + RootFS: imgspecv1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{layerDigest}, + }, + History: []imgspecv1.History{ + { + Created: &created, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + Comment: "imported from SIF, uuid: " + sifImg.ID(), + }, + { + Created: &created, + CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]", + EmptyLayer: true, + }, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err) + } + configDigest := digest.Canonical.FromBytes(configBytes) + + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: imgspecv1.Descriptor{ + Digest: configDigest, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: []imgspecv1.Descriptor{{ + Digest: layerDigest, + Size: layerSize, + MediaType: imgspecv1.MediaTypeImageLayer, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err) + } + + succeeded = true + return &sifImageSource{ + ref: ref, + workDir: workDir, + layerDigest: layerDigest, + layerSize: layerSize, + layerFile: layerPath, + config: configBytes, + configDigest: configDigest, + manifest: manifestBytes, + }, nil +} + +// Reference returns the reference used to set up this source. +func (s *sifImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *sifImageSource) Close() error { + return os.RemoveAll(s.workDir) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *sifImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + switch info.Digest { + case s.configDigest: + return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + case s.layerDigest: + reader, err := os.Open(s.layerFile) + if err != nil { + return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err) + } + return reader, s.layerSize, nil + default: + return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String()) + } +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New("manifest lists are not supported by the sif transport") + } + return s.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by the sif transport") + } + return nil, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go new file mode 100644 index 00000000000..18d894bc35c --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -0,0 +1,164 @@ +package sif + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for SIF images. +var Transport = sifTransport{} + +type sifTransport struct{} + +func (t sifTransport) Name() string { + return "sif" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// sifReference is an ImageReference for SIF images. +type sifReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + file string // As specified by the user. May be relative, contain symlinks, etc. + resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no sif.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// sifTransport.ParseReference; callers who intentionally deal with SIF files +// can use sif.NewReference. + +// NewReference returns an image file reference for a specified path. +func NewReference(file string) (types.ImageReference, error) { + // We do not expose an API supplying the resolvedFile; we could, but recomputing it + // is generally cheap enough that we prefer being confident about the properties of resolvedFile. + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + return sifReference{file: file, resolvedFile: resolved}, nil +} + +func (ref sifReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref sifReference) StringWithinTransport() string { + return ref.file +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref sifReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref sifReference) PolicyConfigurationIdentity() string { + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref sifReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by sifTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.New(`"sif:" locations can only be read from, not written to`) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for sif: images") +} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go index 961246147a3..249b5a1fe46 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -6,7 +6,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "strings" // This code is used only to parse the data in an explicitly-untrusted @@ -82,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents if !md.IsSigned { return nil, "", errors.New("The input is not a signature") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: An error during reading the body can happen only if // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index c166fb32d89..4c7968417ed 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -7,7 +7,6 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" "github.com/proglottis/gpgme" @@ -37,7 +36,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith // of these keys. // The caller must call .Close() on the returned SigningMechanism. func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") if err != nil { return nil, nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index ef4e70e7f2e..63cb7788bbb 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -7,7 +7,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "path" "strings" @@ -44,7 +44,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWith } } - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) if err != nil { if !os.IsNotExist(err) { return nil, err @@ -130,7 +130,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ if !md.IsSigned { return nil, "", errors.New("not signed") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted // (and possibly also signed, but it _must_ be encrypted) and the signing diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 82fbb68cb14..bb91cae8c16 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -16,7 +16,6 @@ package signature import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -80,7 +79,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri // NewPolicyFromFile returns a policy configured in the specified file. func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) + contents, err := os.ReadFile(fileName) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 26cca4759e0..65e8259732b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -5,7 +5,7 @@ package signature import ( "context" "fmt" - "io/ioutil" + "os" "strings" "github.com/containers/image/v5/manifest" @@ -33,7 +33,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types if pr.KeyData != nil { data = pr.KeyData } else { - d, err := ioutil.ReadFile(pr.KeyPath) + d, err := os.ReadFile(pr.KeyPath) if err != nil { return sarRejected, nil, err } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go new file mode 100644 index 00000000000..8071e3b32fa --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -0,0 +1,1356 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "bytes" + "context" + "encoding/json" + stderrors "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value), + // and there is no known user of this error. + ErrBlobDigestMismatch = stderrors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = stderrors.New("blob size mismatch") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + manifestDigest digest.Digest // Valid if len(manifest) != 0 + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice + + // A storage destination may be used concurrently. Accesses are + // serialized via a mutex. Please refer to the individual comments + // below for details. + lock sync.Mutex + // Mapping from layer (by index) to the associated ID in the storage. + // It's protected *implicitly* since `commitLayer()`, at any given + // time, can only be executed by *one* goroutine. Please refer to + // `queueOrCommit()` for further details on how the single-caller + // guarantee is implemented. + indexToStorageID map[int]*string + // All accesses to below data are protected by `lock` which is made + // *explicit* in the code. + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + +// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey +func manifestBigDataKey(digest digest.Digest) string { + return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() +} + +// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions. +// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; +func signatureBigDataKey(digest digest.Digest) string { + return "signature-" + digest.Encoded() +} + +// newImageSource sets up an image for reading. +func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. + img, err := imageRef.resolveImage(sys) + if err != nil { + return nil, err + } + + // Build the reader object. + image := &storageImageSource{ + imageRef: imageRef, + systemContext: sys, + image: img, + layerPosition: make(map[digest.Digest]int), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + } + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "decoding metadata for source image") + } + } + return image, nil +} + +// Reference returns the image reference that we used to find this image. +func (s *storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up any resources we tied up while reading the image. +func (s *storageImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *storageImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { + if info.Digest == image.GzippedEmptyLayerDigest { + return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } + + // NOTE: the blob is first written to a temporary file and subsequently + // closed. The intention is to keep the time we own the storage lock + // as short as possible to allow other processes to access the storage. + rc, n, _, err = s.getBlobAndLayerID(info) + if err != nil { + return nil, 0, err + } + defer rc.Close() + + tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + if err != nil { + return nil, 0, err + } + + if _, err := io.Copy(tmpFile, rc); err != nil { + return nil, 0, err + } + + if _, err := tmpFile.Seek(0, 0); err != nil { + return nil, 0, err + } + + wrapper := ioutils.NewReadCloserWrapper(tmpFile, func() error { + defer os.Remove(tmpFile.Name()) + return tmpFile.Close() + }) + + return wrapper, n, err +} + +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return io.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + s.getBlobMutex.Lock() + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + s.getBlobMutex.Unlock() + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + key := manifestBigDataKey(*instanceDigest) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, "", errors.Wrapf(err, "reading manifest for image instance %q", *instanceDigest) + } + return blob, manifest.GuessMIMEType(blob), err + } + if len(s.cachedManifest) == 0 { + // The manifest is stored as a big data item. + // Prefer the manifest corresponding to the user-specified digest, if available. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + key := manifestBigDataKey(digested.Digest()) + blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key + return nil, "", err + } + if err == nil { + s.cachedManifest = blob + } + } + } + // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. + // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). + if len(s.cachedManifest) == 0 { + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + manifestBlob, manifestType, err := s.GetManifest(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "reading image manifest for %q", s.image.ID) + } + if manifest.MIMETypeIsMultiImage(manifestType) { + return nil, errors.Errorf("can't copy layers for a manifest list (shouldn't be attempted)") + } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing image manifest for %q", s.image.ID) + } + + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + } + + physicalBlobInfos := []types.BlobInfo{} + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return nil, errors.Wrapf(err, "reading layer %q in image %q", layerID, s.image.ID) + } + if layer.UncompressedDigest == "" { + return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) + } + if layer.UncompressedSize < 0 { + return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) + layerID = layer.Parent + } + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, errors.Wrapf(err, "creating LayerInfosForCopy of image %q", s.image.ID) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + var offset int + sigslice := [][]byte{} + signature := []byte{} + signatureSizes := s.SignatureSizes + key := "signatures" + instance := "default instance" + if instanceDigest != nil { + signatureSizes = s.SignaturesSizes[*instanceDigest] + key = signatureBigDataKey(*instanceDigest) + instance = instanceDigest.Encoded() + } + if len(signatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) + if err != nil { + return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s)", s.image.ID, instance) + } + signature = signatureBlob + } + for _, length := range signatureSizes { + if offset+length > len(signature) { + return nil, errors.Wrapf(err, "looking up signatures data for image %q (%s): expected at least %d bytes, only found %d", s.image.ID, instance, len(signature), offset+length) + } + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data (%s) contained %d extra bytes", instance, len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { + directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + if err != nil { + return nil, errors.Wrapf(err, "creating a temporary directory") + } + image := &storageImageDestination{ + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + indexToStorageID: make(map[int]*string), + indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + } + return image, nil +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (s *storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +// Close cleans up the temporary directory and additional layer store handlers. +func (s *storageImageDestination) Close() error { + for _, al := range s.blobAdditionalLayer { + al.Release() + } + for _, v := range s.diffOutputs { + if v.Target != "" { + _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) + } + } + return os.RemoveAll(s.directory) +} + +func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { + // We ultimately have to decompress layers to populate trees on disk + // and need to explicitly ask for it here, so that the layers' MIME + // types can be set accordingly. + return types.PreserveOriginal +} + +func (s *storageImageDestination) computeNextBlobCacheFile() string { + return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) + if err != nil { + return info, err + } + + if options.IsConfig || options.LayerIndex == nil { + return info, nil + } + + return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + +// PutBlob writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{ + Cache: cache, + IsConfig: isConfig, + }) +} + +// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { + // Stores a layer or data blob in our temporary directory, checking that any information + // in the blobinfo matches the incoming data. + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + if blobinfo.Digest != "" { + if err := blobinfo.Digest.Validate(); err != nil { + return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) + } + } + + // Set up to digest the blob if necessary, and count its size while saving it to a file. + filename := s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename) + } + defer file.Close() + counter := ioutils.NewWriteCounter(file) + stream = io.TeeReader(stream, counter) + digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) + decompressed, err := archive.DecompressStream(stream) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob") + } + + diffID := digest.Canonical.Digester() + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename) + } + + // Determine blob properties, and fail if information that we were given about the blob + // is known to be incorrect. + blobDigest := digester.Digest() + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } else if blobinfo.Size != counter.Count { + return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch) + } + + // Record information about the blob. + s.lock.Lock() + s.blobDiffIDs[blobDigest] = diffID.Digest() + s.fileSizes[blobDigest] = counter.Count + s.filenames[blobDigest] = filename + s.lock.Unlock() + // This is safe because we have just computed diffID, and blobDigest was either computed + // by us, or validated by the caller (usually copy.digestingReader). + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + return types.BlobInfo{ + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, + }, nil +} + +type zstdFetcher struct { + chunkAccessor private.BlobChunkAccessor + ctx context.Context + blobInfo types.BlobInfo +} + +// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. +func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + var newChunks []private.ImageSourceChunk + for _, v := range chunks { + i := private.ImageSourceChunk{ + Offset: v.Offset, + Length: v.Length, + } + newChunks = append(newChunks, i) + } + rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(private.BadPartialRequestError); ok { + err = chunked.ErrBadRequest{} + } + return rc, errs, err + +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { + fetcher := zstdFetcher{ + chunkAccessor: chunkAccessor, + ctx: ctx, + blobInfo: srcInfo, + } + + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) + if err != nil { + return srcInfo, err + } + + out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) + if err != nil { + return srcInfo, err + } + + blobDigest := srcInfo.Digest + + s.lock.Lock() + s.blobDiffIDs[blobDigest] = blobDigest + s.fileSizes[blobDigest] = 0 + s.filenames[blobDigest] = "" + s.diffOutputs[blobDigest] = out + s.lock.Unlock() + + return srcInfo, nil +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{ + Cache: cache, + CanSubstitute: canSubstitute, + }) +} + +// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + // lock the entire method as it executes fairly quickly + s.lock.Lock() + defer s.lock.Unlock() + + if options.SrcRef != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[blobinfo.Digest] = aLayer + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: aLayer.CompressedSize(), + MediaType: blobinfo.MediaType, + }, nil + } + } + + if blobinfo.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) + } + if err := blobinfo.Digest.Validate(); err != nil { + return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) + } + + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: size, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: layers[0].CompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + + // Does the blob correspond to a known DiffID which we already have available? + // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the + // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. + if options.CanSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest) + } + if len(layers) > 0 { + if blobinfo.Size != -1 { + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, blobinfo, nil + } + if !options.CanSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) + } + s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + return true, types.BlobInfo{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, + MediaType: blobinfo.MediaType, + }, nil + } + } + } + + // Nope, we don't have it. + return false, types.BlobInfo{}, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m := m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + for i, compat := range m.ExtractedV1Compatibility { + if compat.ThrowAway { + continue + } + blobSum := m.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. + default: + return "" + } + id, err := m.ImageID(diffIDs) + if err != nil { + return "" + } + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := os.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") +} + +// queueOrCommit queues in the specified blob to be committed to the storage. +// If no other goroutine is already committing layers, the layer and all +// subsequent layers (if already queued) will be committed to the storage. +func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error { + // NOTE: whenever the code below is touched, make sure that all code + // paths unlock the lock and to unlock it exactly once. + // + // Conceptually, the code is divided in two stages: + // + // 1) Queue in work by marking the layer as ready to be committed. + // If at least one previous/parent layer with a lower index has + // not yet been committed, return early. + // + // 2) Process the queued-in work by committing the "ready" layers + // in sequence. Make sure that more items can be queued-in + // during the comparatively I/O expensive task of committing a + // layer. + // + // The conceptual benefit of this design is that caller can continue + // pulling layers after an early return. At any given time, only one + // caller is the "worker" routine committing layers. All other routines + // can continue pulling and queuing in layers. + s.lock.Lock() + s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{ + BlobInfo: blob, + EmptyLayer: emptyLayer, + } + + // We're still waiting for at least one previous/parent layer to be + // committed, so there's nothing to do. + if index != s.currentIndex { + s.lock.Unlock() + return nil + } + + for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] { + s.lock.Unlock() + // Note: commitLayer locks on-demand. + if err := s.commitLayer(ctx, *info, index); err != nil { + return err + } + s.lock.Lock() + index++ + } + + // Set the index at the very end to make sure that only one routine + // enters stage 2). + s.currentIndex = index + s.lock.Unlock() + return nil +} + +// commitLayer commits the specified blob with the given index to the storage. +// Note that the previous layer is expected to already be committed. +// +// Caution: this function must be called without holding `s.lock`. Callers +// must guarantee that, at any given time, at most one goroutine may execute +// `commitLayer()`. +func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error { + // Already committed? Return early. + if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { + return nil + } + + // Start with an empty string or the previous layer ID. Note that + // `s.indexToStorageID` can only be accessed by *one* goroutine at any + // given time. Hence, we don't need to lock accesses. + var lastLayer string + if prev := s.indexToStorageID[index-1]; prev != nil { + lastLayer = *prev + } + + // Carry over the previous ID for empty non-base layers. + if blob.EmptyLayer { + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + s.lock.Lock() + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + s.lock.Unlock() + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // that relies on using a blob digest that has never been seen by the store had better call + // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only + // so far we are going to accommodate that (if we should be doing that at all). + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + // NOTE: use `TryReusingBlob` to prevent recursion. + has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + if err != nil { + return errors.Wrapf(err, "checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + s.lock.Lock() + diffOutput, ok := s.diffOutputs[blob.Digest] + s.lock.Unlock() + if ok { + layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) + if err != nil { + return err + } + + // FIXME: what to do with the uncompressed digest? + diffOutput.UncompressedDigest = blob.Digest + + if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { + _ = s.imageRef.transport.store.Delete(layer.ID) + return err + } + + s.indexToStorageID[index] = &layer.ID + return nil + } + + s.lock.Lock() + al, ok := s.blobAdditionalLayer[blob.Digest] + s.lock.Unlock() + if ok { + layer, err := al.PutAs(id, lastLayer, nil) + if err != nil { + return errors.Wrapf(err, "failed to put layer from digest and labels") + } + lastLayer = layer.ID + s.indexToStorageID[index] = &lastLayer + return nil + } + + // Check if we previously cached a file with that blob's contents. If we didn't, + // then we need to read the desired contents from a layer. + s.lock.Lock() + filename, ok := s.filenames[blob.Digest] + s.lock.Unlock() + if !ok { + // Try to find the layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "locating layer for blob %q", blob.Digest) + } + // Read the layer's contents. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "reading layer %q for blob %q", layer, blob.Digest) + } + // Copy the layer diff to a file. Diff() takes a lock that it holds + // until the ReadCloser that it returns is closed, and PutLayer() wants + // the same lock, so the diff can't just be directly streamed from one + // to the other. + filename = s.computeNextBlobCacheFile() + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + diff.Close() + return errors.Wrapf(err, "creating temporary file %q", filename) + } + // Copy the data to the file. + // TODO: This can take quite some time, and should ideally be cancellable using + // ctx.Done(). + _, err = io.Copy(file, diff) + diff.Close() + file.Close() + if err != nil { + return errors.Wrapf(err, "storing blob to file %q", filename) + } + // Make sure that we can find this file later, should we need the layer's + // contents again. + s.lock.Lock() + s.filenames[blob.Digest] = filename + s.lock.Unlock() + } + // Read the cached blob and use it as a diff. + file, err := os.Open(filename) + if err != nil { + return errors.Wrapf(err, "opening file %q", filename) + } + defer file.Close() + // Build the new layer using the diff, regardless of where it came from. + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: blob.Digest, + UncompressedDigest: diffID, + }, file) + if err != nil && errors.Cause(err) != storage.ErrDuplicateID { + return errors.Wrapf(err, "adding layer with blob %q", blob.Digest) + } + + s.indexToStorageID[index] = &layer.ID + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") + } + toplevelManifest, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return errors.Wrapf(err, "retrieving top-level manifest") + } + // If the name we're saving to includes a digest, then check that the + // manifests that we're about to save all either match the one from the + // unparsedToplevel, or match the digest in the name that we're using. + if s.imageRef.named != nil { + if digested, ok := s.imageRef.named.(reference.Digested); ok { + matches, err := manifest.MatchesDigest(s.manifest, digested.Digest()) + if err != nil { + return err + } + if !matches { + matches, err = manifest.MatchesDigest(toplevelManifest, digested.Digest()) + if err != nil { + return err + } + } + if !matches { + return fmt.Errorf("Manifest to be saved does not match expected digest %s", digested.Digest()) + } + } + } + // Find the list of layer blobs. + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "parsing manifest") + } + layerBlobs := man.LayerInfos() + + // Extract, commit, or find the layers. + for i, blob := range layerBlobs { + if err := s.commitLayer(ctx, blob, i); err != nil { + return err + } + } + var lastLayer string + if len(layerBlobs) > 0 { // Can happen when using caches + prev := s.indexToStorageID[len(layerBlobs)-1] + if prev == nil { + return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) + } + lastLayer = *prev + } + + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = *inspect.Created + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) + if err != nil { + if errors.Cause(err) != storage.ErrDuplicateID { + logrus.Debugf("error creating image: %q", err) + return errors.Wrapf(err, "creating image %q", intendedID) + } + img, err = s.imageRef.transport.store.Image(intendedID) + if err != nil { + return errors.Wrapf(err, "reading image %q", intendedID) + } + if img.TopLayer != lastLayer { + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) + } + logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) + } else { + logrus.Debugf("created new image ID %q", img.ID) + } + + // Clean up the unfinished image on any error. + // (Is this the right thing to do if the image has existed before?) + commitSucceeded := false + defer func() { + if !commitSucceeded { + logrus.Errorf("Updating image %q (old names %v) failed, deleting it", img.ID, oldNames) + if _, err := s.imageRef.transport.store.DeleteImage(img.ID, true); err != nil { + logrus.Errorf("Error deleting incomplete image %q: %v", img.ID, err) + } + } + }() + + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} + } + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := os.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "saving big data %q for image %q", blob.String(), img.ID) + } + } + // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below. + if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + manifestDigest, err := manifest.Digest(toplevelManifest) + if err != nil { + return errors.Wrapf(err, "digesting top-level manifest") + } + key := manifestBigDataKey(manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil { + logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving top-level manifest for image %q", img.ID) + } + } + // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + key := manifestBigDataKey(s.manifestDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving manifest for image %q", img.ID) + } + key = storage.ImageDigestBigDataKey + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving manifest for image %q", img.ID) + } + // Save the signatures, if we have any. + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving signatures for image %q", img.ID) + } + } + for instanceDigest, signatures := range s.signatureses { + key := signatureBigDataKey(instanceDigest) + if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil { + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving signatures for image %q", img.ID) + } + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return errors.Wrapf(err, "encoding metadata for image %q", img.ID) + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return errors.Wrapf(err, "saving metadata for image %q", img.ID) + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + // Adds the reference's name on the image. We don't need to worry about avoiding duplicate + // values because AddNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); name != nil { + if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { + return errors.Wrapf(err, "adding names %v to image %q", name, img.ID) + } + logrus.Debugf("added name %q to image %q", name, img.ID) + } + + commitSucceeded = true + return nil +} + +var manifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + manifest.DockerV2Schema2MediaType, + manifest.DockerV2Schema1SignedMediaType, + manifest.DockerV2Schema1MediaType, +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return manifestMIMETypes +} + +// PutManifest writes the manifest to the destination. +func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { + digest, err := manifest.Digest(manifestBlob) + if err != nil { + return err + } + newBlob := make([]byte, len(manifestBlob)) + copy(newBlob, manifestBlob) + s.manifest = newBlob + s.manifestDigest = digest + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (s *storageImageDestination) MustMatchRuntimeOS() bool { + return true +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { + return true // Yes, we want the unmodified manifest +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (s *storageImageDestination) SupportsPutBlobPartial() bool { + return true +} + +// PutSignatures records the image's signatures for committing as a single data blob. +func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + if instanceDigest == nil { + s.signatures = sigblob + s.SignatureSizes = sizes + if len(s.manifest) > 0 { + manifestDigest := s.manifestDigest + instanceDigest = &manifestDigest + } + } + if instanceDigest != nil { + s.signatureses[*instanceDigest] = sigblob + s.SignaturesSizes[*instanceDigest] = sizes + } + return nil +} + +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) + if err != nil { + return -1, errors.Wrapf(err, "reading image %q", s.image.ID) + } + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) + if err != nil { + return -1, errors.Wrapf(err, "reading data blob size %q for %q", dataName, s.image.ID) + } + sum += bigSize + } + // Add the signature sizes. + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + // Walk the layer list. + layerID := s.image.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err + } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent + } + return sum, nil +} + +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, s) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImageCloser{ImageCloser: img, size: size}, nil +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go new file mode 100644 index 00000000000..7c6da112c74 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -0,0 +1,288 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "context" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +// Either "named" or "id" must be set. +type storageReference struct { + transport storageTransport + named reference.Named // may include a tag and/or a digest + id string +} + +func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { + if named == nil && id == "" { + return nil, ErrInvalidReference + } + if named != nil && reference.IsNameOnly(named) { + return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String()) + } + if id != "" { + if err := validateImageID(id); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err) + } + } + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + named: named, + id: id, + }, nil +} + +// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref +func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { + repo := ref.Name() + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if named.Name() == repo { + return true + } + } + } + return false +} + +// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a +// multi-arch manifest that matches the passed-in digest, and the image is the per-platform +// image instance that matches sys. +// +// See the comment in storageReference.ResolveImage explaining why +// this check is necessary. +func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image, manifestDigest digest.Digest, sys *types.SystemContext) bool { + // Load the manifest that matches the specified digest. + // We don't need to care about storage.ImageDigestBigDataKey because + // manifests lists are only stored into storage by c/image versions + // that know about manifestBigDataKey, and only using that key. + key := manifestBigDataKey(manifestDigest) + manifestBytes, err := store.ImageBigData(img.ID, key) + if err != nil { + return false + } + // The manifest is either a list, or not a list. If it's a list, find + // the digest of the instance that matches the current system, and try + // to load that manifest from the image record, and use it. + manifestType := manifest.GuessMIMEType(manifestBytes) + if !manifest.MIMETypeIsMultiImage(manifestType) { + // manifestDigest directly specifies a per-platform image, so we aren't + // choosing among different variants. + return false + } + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return false + } + chosenInstance, err := list.ChooseInstance(sys) + if err != nil { + return false + } + key = manifestBigDataKey(chosenInstance) + _, err = store.ImageBigData(img.ID, key) + return err == nil // true if img.ID is based on chosenInstance. +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID, and return the image. +func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) { + var loadedImage *storage.Image + if s.id == "" && s.named != nil { + // Look for an image that has the expanded reference name as an explicit Name value. + image, err := s.transport.store.Image(s.named.String()) + if image != nil && err == nil { + loadedImage = image + s.id = image.ID + } + } + if s.id == "" && s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + // + // Typically there should be at most one such image, because the same + // manifest digest implies the same config, and we choose the storage ID + // based on the config (deduplicating images), except: + // - the user can explicitly specify an ID when creating the image. + // In this case we don't have a preference among the alternatives. + // - when pulling an image from a multi-platform manifest list, we also + // store the manifest list in the image; this allows referencing a + // per-platform image using the manifest list digest, but that also + // means that we can have multiple genuinely different images in the + // storage matching the same manifest list digest (if pulled using different + // SystemContext.{OS,Architecture,Variant}Choice to the same storage). + // In this case we prefer the image matching the current SystemContext. + images, err := s.transport.store.ImagesByDigest(digested.Digest()) + if err == nil && len(images) > 0 { + for _, image := range images { + if imageMatchesRepo(image, s.named) { + if loadedImage == nil || multiArchImageMatchesSystemContext(s.transport.store, image, digested.Digest(), sys) { + loadedImage = image + s.id = image.ID + } + } + } + } + } + } + if s.id == "" { + logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) + } + if loadedImage == nil { + img, err := s.transport.store.Image(s.id) + if err != nil { + return nil, errors.Wrapf(err, "reading image %q", s.id) + } + loadedImage = img + } + if s.named != nil { + if !imageMatchesRepo(loadedImage, s.named) { + logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) + return nil, ErrNoSuchImage + } + } + // Default to having the image digest that we hand back match the most recently + // added manifest... + if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { + loadedImage.Digest = digest + } + // ... unless the named reference says otherwise, and it matches one of the digests + // in the image. For those cases, set the Digest field to that value, for the + // sake of older consumers that don't know there's a whole list in there now. + if s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + for _, digest := range loadedImage.Digests { + if digest == digested.Digest() { + loadedImage.Digest = digest + break + } + } + } + } + return loadedImage, nil +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + defaultUIDMap: s.transport.defaultUIDMap, + defaultGIDMap: s.transport.defaultGIDMap, + } +} + +// Return a name with a tag or digest, if we have either, else return it bare. +func (s storageReference) DockerReference() reference.Named { + return s.named +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + optionsList := "" + options := s.transport.store.GraphOptions() + if len(options) > 0 { + optionsList = ":" + strings.Join(options, ",") + } + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +func (s storageReference) PolicyConfigurationIdentity() string { + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.named != nil { + res = res + s.named.String() + } + if s.id != "" { + res = res + "@" + s.id + } + return res +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" + namespaces := []string{} + if s.named != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.named.String()) + } + tagged, isTagged := s.named.(reference.Tagged) + _, isDigested := s.named.(reference.Digested) + if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. + namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) + } + components := strings.Split(s.named.Name(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, sys, s) +} + +func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + img, err := s.resolveImage(sys) + if err != nil { + return err + } + layers, err := s.transport.store.DeleteImage(img.ID, true) + if err == nil { + logrus.Debugf("deleted image %q", img.ID) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, s) +} + +func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(sys, s) +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go new file mode 100644 index 00000000000..07393ee7431 --- /dev/null +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -0,0 +1,388 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package storage + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + minimumTruncatedIDLength = 3 +) + +func init() { + transports.Register(Transport) +} + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetStoreIfSet returns the default store for this transport, or nil if not set/determined yet. + GetStoreIfSet() storage.Store + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) + // NewStoreReference creates a reference for (named@ID) in store. + // either of name or ID can be unset; named must not be a reference.IsNameOnly. + NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) + // SetDefaultUIDMap sets the default UID map to use when opening stores. + SetDefaultUIDMap(idmap []idtools.IDMap) + // SetDefaultGIDMap sets the default GID map to use when opening stores. + SetDefaultGIDMap(idmap []idtools.IDMap) + // DefaultUIDMap returns the default UID map used when opening stores. + DefaultUIDMap() []idtools.IDMap + // DefaultGIDMap returns the default GID map used when opening stores. + DefaultGIDMap() []idtools.IDMap +} + +type storageTransport struct { + store storage.Store + defaultUIDMap []idtools.IDMap + defaultGIDMap []idtools.IDMap +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// GetStoreIfSet returns the default store for this transport, as set using SetStore() or initialized by default, or nil if not set/determined yet. +func (s *storageTransport) GetStoreIfSet() storage.Store { + return s.store +} + +// SetDefaultUIDMap sets the default UID map to use when opening stores. +func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { + s.defaultUIDMap = idmap +} + +// SetDefaultGIDMap sets the default GID map to use when opening stores. +func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { + s.defaultGIDMap = idmap +} + +// DefaultUIDMap returns the default UID map used when opening stores. +func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { + return s.defaultUIDMap +} + +// DefaultGIDMap returns the default GID map used when opening stores. +func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { + return s.defaultGIDMap +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + if ref == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) + } + ref = ref[closeIndex+1:] + } + + // The reference may end with an image ID. Image IDs and digests use the same "@" separator; + // here we only peel away an image ID, and leave digests alone. + split := strings.LastIndex(ref, "@") + id := "" + if split != -1 { + possibleID := ref[split+1:] + if possibleID == "" { + return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) + } + // If it looks like a digest, leave it alone for now. + if _, err := digest.Parse(possibleID); err != nil { + // Otherwise… + if err := validateImageID(possibleID); err == nil { + id = possibleID // … it is a full ID + } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { + // … it is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) + } + // We have recognized an image ID; peel it off. + ref = ref[:split] + } + } + + // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + var named reference.Named + // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been + // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. + if ref != "" { + var err error + named, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, errors.Wrapf(err, "parsing named reference %q", ref) + } + named = reference.TagNameOnly(named) + } + + result, err := s.NewStoreReference(store, named, id) + if err != nil { + return nil, err + } + logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) + return result, nil +} + +// NewStoreReference creates a reference for (named@ID) in store. +// either of name or ID can be unset; named must not be a reference.IsNameOnly. +func (s *storageTransport) NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) { + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + options, err := storage.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + options.UIDMap = s.defaultUIDMap + options.GIDMap = s.defaultGIDMap + store, err := storage.GetStore(options) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or +// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", +// tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + var store storage.Store + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if len(reference) > 0 && reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + // Peel off a "driver@" from the start. + driverInfo := "" + driverSplit := strings.SplitN(storeSpec, "@", 2) + if len(driverSplit) != 2 { + if storeSpec == "" { + return nil, ErrInvalidReference + } + } else { + driverInfo = driverSplit[0] + if driverInfo == "" { + return nil, ErrInvalidReference + } + storeSpec = driverSplit[1] + if storeSpec == "" { + return nil, ErrInvalidReference + } + } + // Peel off a ":options" from the end. + var options []string + optionsSplit := strings.SplitN(storeSpec, ":", 2) + if len(optionsSplit) == 2 { + options = strings.Split(optionsSplit[1], ",") + storeSpec = optionsSplit[0] + } + // Peel off a "+runroot" from the new end. + runRootInfo := "" + runRootSplit := strings.SplitN(storeSpec, "+", 2) + if len(runRootSplit) == 2 { + runRootInfo = runRootSplit[1] + storeSpec = runRootSplit[0] + } + // The rest is our graph root. + rootInfo := storeSpec + // Check that any paths are absolute paths. + if rootInfo != "" && !filepath.IsAbs(rootInfo) { + return nil, ErrPathNotAbsolute + } + if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: driverInfo, + GraphRoot: rootInfo, + RunRoot: runRootInfo, + GraphDriverOptions: options, + UIDMap: s.defaultUIDMap, + GIDMap: s.defaultGIDMap, + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // We didn't have a store spec, so use the default. + store2, err := s.GetStore() + if err != nil { + return nil, err + } + store = store2 + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref != nil { + if img, err := store.Image(dref.String()); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + tmpRef := *sref + if img, err := tmpRef.resolveImage(nil); err == nil { + return img, nil + } + } + return nil, storage.ErrImageUnknown +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: scope specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + + fields := strings.SplitN(scope, "@", 3) + switch len(fields) { + case 1: // name only + case 2: // name:tag@ID or name[:tag]@digest + if idErr := validateImageID(fields[1]); idErr != nil { + if _, digestErr := digest.Parse(fields[1]); digestErr != nil { + return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) + } + } + case 3: // name[:tag]@digest@ID + if _, err := digest.Parse(fields[1]); err != nil { + return err + } + if err := validateImageID(fields[2]); err != nil { + return err + } + default: // Coverage: This should never happen + return errors.New("Internal error: unexpected number of fields form strings.SplitN") + } + // As for field[0], if it is non-empty at all: + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// validateImageID returns nil if id is a valid (full) image ID, or an error +func validateImageID(id string) error { + _, err := digest.Parse("sha256:" + id) + return err +} diff --git a/vendor/github.com/containers/image/v5/tarball/doc.go b/vendor/github.com/containers/image/v5/tarball/doc.go new file mode 100644 index 00000000000..e9d321b8f87 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/doc.go @@ -0,0 +1,60 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "context" +// +// cp "github.com/containers/image/v5/copy" +// "github.com/containers/image/v5/signature" +// "github.com/containers/image/v5/tarball" +// "github.com/containers/image/v5/transports/alltransports" +// "github.com/containers/image/v5/types" +// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// +// policy, err := signature.DefaultPolicy(nil) +// if err != nil { +// panic(err) +// } +// +// pc, err := signature.NewPolicyContext(policy) +// if err != nil { +// panic(err) +// } +// defer pc.Destroy() +// _, err = cp.Image(context.TODO(), pc, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go new file mode 100644 index 00000000000..23f67c49e62 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -0,0 +1,93 @@ +package tarball + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return Transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx, sys) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, sys, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go new file mode 100644 index 00000000000..aedfdf5de65 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -0,0 +1,273 @@ +package tarball + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/v5/types" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := pgzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + n, err := io.Copy(io.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (is *tarballImageSource) HasThreadSafeGetBlob() bool { + return false +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (*tarballImageSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go new file mode 100644 index 00000000000..63d835530b5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -0,0 +1,75 @@ +package tarball + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = io.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + return NewReference(filenames, stdin) +} + +// NewReference creates a new "tarball:" reference for the listed fileNames. +// If any of the fileNames is "-", the contents of stdin are used instead. +func NewReference(fileNames []string, stdin []byte) (types.ImageReference, error) { + for _, path := range fileNames { + if strings.Contains(path, separator) { + return nil, fmt.Errorf("Invalid path %q: paths including the separator %q are not supported", path, separator) + } + } + return &tarballReference{ + filenames: fileNames, + stdin: stdin, + }, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go new file mode 100644 index 00000000000..0bae8b2599d --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -0,0 +1,48 @@ +package alltransports + +import ( + "strings" + + // register all known transports + // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating + // a transport. + _ "github.com/containers/image/v5/directory" + _ "github.com/containers/image/v5/docker" + _ "github.com/containers/image/v5/docker/archive" + _ "github.com/containers/image/v5/oci/archive" + _ "github.com/containers/image/v5/oci/layout" + _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/sif" + _ "github.com/containers/image/v5/tarball" + + // The ostree transport is registered by ostree*.go + // The storage transport is registered by storage*.go + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// ParseImageName converts a URL-like image name to a types.ImageReference. +func ParseImageName(imgName string) (types.ImageReference, error) { + // Keep this in sync with TransportFromImageName! + parts := strings.SplitN(imgName, ":", 2) + if len(parts) != 2 { + return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + } + transport := transports.Get(parts[0]) + if transport == nil { + return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + } + return transport.ParseReference(parts[1]) +} + +// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when +// the transport is unknown or when the input is invalid. +func TransportFromImageName(imageName string) types.ImageTransport { + // Keep this in sync with ParseImageName! + parts := strings.SplitN(imageName, ":", 2) + if len(parts) == 2 { + return transports.Get(parts[0]) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go new file mode 100644 index 00000000000..ffac6e0b8a3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go @@ -0,0 +1,9 @@ +//go:build !containers_image_docker_daemon_stub +// +build !containers_image_docker_daemon_stub + +package alltransports + +import ( + // Register the docker-daemon transport + _ "github.com/containers/image/v5/docker/daemon" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go new file mode 100644 index 00000000000..ddc347bf35d --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go @@ -0,0 +1,10 @@ +//go:build containers_image_docker_daemon_stub +// +build containers_image_docker_daemon_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("docker-daemon")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go new file mode 100644 index 00000000000..2340702bdc5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go @@ -0,0 +1,9 @@ +//go:build containers_image_ostree && linux +// +build containers_image_ostree,linux + +package alltransports + +import ( + // Register the ostree transport + _ "github.com/containers/image/v5/ostree" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go new file mode 100644 index 00000000000..8c4175188f0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go @@ -0,0 +1,10 @@ +//go:build !containers_image_ostree || !linux +// +build !containers_image_ostree !linux + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("ostree")) +} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go new file mode 100644 index 00000000000..1e399cdb024 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go @@ -0,0 +1,9 @@ +//go:build !containers_image_storage_stub +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/v5/storage" +) diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go new file mode 100644 index 00000000000..30802661f17 --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go @@ -0,0 +1,10 @@ +//go:build containers_image_storage_stub +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/v5/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index c928b87abdc..6295b549305 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -8,7 +8,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 21 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/ocicrypt/MAINTAINERS b/vendor/github.com/containers/ocicrypt/MAINTAINERS index e6a7d1f0a7d..af38d03bffb 100644 --- a/vendor/github.com/containers/ocicrypt/MAINTAINERS +++ b/vendor/github.com/containers/ocicrypt/MAINTAINERS @@ -3,3 +3,4 @@ # Github ID, Name, Email Address lumjjb, Brandon Lum, lumjjb@gmail.com stefanberger, Stefan Berger, stefanb@linux.ibm.com +arronwy, Arron Wang, arron.wang@intel.com diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go index 448e88c7cd9..7d80f5f844b 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/pkcs11helpers.go @@ -40,8 +40,6 @@ import ( var ( // OAEPLabel defines the label we use for OAEP encryption; this cannot be changed OAEPLabel = []byte("") - // OAEPDefaultHash defines the default hash used for OAEP encryption; this cannot be changed - OAEPDefaultHash = "sha1" // OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM OAEPSha1Params = &pkcs11.OAEPParams{ @@ -69,12 +67,12 @@ func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, stri ) oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") - // The default is 'sha1' + // The default is sha256 (previously was sha1) switch strings.ToLower(oaephash) { - case "sha1", "": + case "sha1": hashfunc = sha1.New() hashalg = "sha1" - case "sha256": + case "sha256", "": hashfunc = sha256.New() hashalg = "sha256" default: @@ -283,12 +281,12 @@ func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, s var oaep *pkcs11.OAEPParams oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG") - // the default is sha1 + // The default is sha256 (previously was sha1) switch strings.ToLower(oaephash) { - case "sha1", "": + case "sha1": oaep = OAEPSha1Params hashalg = "sha1" - case "sha256": + case "sha256", "": oaep = OAEPSha256Params hashalg = "sha256" default: @@ -333,7 +331,7 @@ func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hash var oaep *pkcs11.OAEPParams - // the default is sha1 + // An empty string from the Hash in the JSON historically defaults to sha1. switch hashalg { case "sha1", "": oaep = OAEPSha1Params @@ -410,9 +408,6 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { return nil, err } - if hashalg == OAEPDefaultHash { - hashalg = "" - } recipient := Pkcs11Recipient{ Version: 0, Blob: base64.StdEncoding.EncodeToString(ciphertext), @@ -431,15 +426,18 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) { // { // "version": 0, // "blob": , -// "hash": +// "hash": // } , // { // "version": 0, // "blob": , -// "hash": +// "hash": // } , // [...] // } +// Note: More recent versions of this code explicitly write 'sha1' +// while older versions left it empty in case of 'sha1'. +// func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) { pkcs11blob := Pkcs11Blob{} err := json.Unmarshal(pkcs11blobstr, &pkcs11blob) diff --git a/vendor/github.com/containers/podman/v4/LICENSE b/vendor/github.com/containers/podman/v4/LICENSE new file mode 100644 index 00000000000..9b259bdfcf9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go new file mode 100644 index 00000000000..8f52799812b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go @@ -0,0 +1,152 @@ +package define + +const ( + // InspectAnnotationCIDFile is used by Inspect to determine if a + // container ID file was created for the container. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationCIDFile = "io.podman.annotations.cid-file" + // InspectAnnotationAutoremove is used by Inspect to determine if a + // container will be automatically removed on exit. + // If an annotation with this key is found in the OCI spec and is one of + // the two supported boolean values (InspectResponseTrue and + // InspectResponseFalse) it will be used in the output of Inspect(). + InspectAnnotationAutoremove = "io.podman.annotations.autoremove" + // InspectAnnotationVolumesFrom is used by Inspect to identify + // containers whose volumes are are being used by this container. + // It is expected to be a comma-separated list of container names and/or + // IDs. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationVolumesFrom = "io.podman.annotations.volumes-from" + // InspectAnnotationPrivileged is used by Inspect to identify containers + // which are privileged (IE, running with elevated privileges). + // It is expected to be a boolean, populated by one of + // InspectResponseTrue or InspectResponseFalse. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationPrivileged = "io.podman.annotations.privileged" + // InspectAnnotationPublishAll is used by Inspect to identify containers + // which have all the ports from their image published. + // It is expected to be a boolean, populated by one of + // InspectResponseTrue or InspectResponseFalse. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationPublishAll = "io.podman.annotations.publish-all" + // InspectAnnotationInit is used by Inspect to identify containers that + // mount an init binary in. + // It is expected to be a boolean, populated by one of + // InspectResponseTrue or InspectResponseFalse. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationInit = "io.podman.annotations.init" + // InspectAnnotationLabel is used by Inspect to identify containers with + // special SELinux-related settings. It is used to populate the output + // of the SecurityOpt setting. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationLabel = "io.podman.annotations.label" + // InspectAnnotationSeccomp is used by Inspect to identify containers + // with special Seccomp-related settings. It is used to populate the + // output of the SecurityOpt setting in Inspect. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationSeccomp = "io.podman.annotations.seccomp" + // InspectAnnotationApparmor is used by Inspect to identify containers + // with special Apparmor-related settings. It is used to populate the + // output of the SecurityOpt setting. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationApparmor = "io.podman.annotations.apparmor" + + // InspectResponseTrue is a boolean True response for an inspect + // annotation. + InspectResponseTrue = "TRUE" + // InspectResponseFalse is a boolean False response for an inspect + // annotation. + InspectResponseFalse = "FALSE" + + // CheckpointAnnotationName is used by Container Checkpoint when creating a + // checkpoint image to specify the original human-readable name for the + // container. + CheckpointAnnotationName = "io.podman.annotations.checkpoint.name" + + // CheckpointAnnotationRawImageName is used by Container Checkpoint when + // creating a checkpoint image to specify the original unprocessed name of + // the image used to create the container (as specified by the user). + CheckpointAnnotationRawImageName = "io.podman.annotations.checkpoint.rawImageName" + + // CheckpointAnnotationRootfsImageID is used by Container Checkpoint when + // creating a checkpoint image to specify the original ID of the image used + // to create the container. + CheckpointAnnotationRootfsImageID = "io.podman.annotations.checkpoint.rootfsImageID" + + // CheckpointAnnotationRootfsImageName is used by Container Checkpoint when + // creating a checkpoint image to specify the original image name used to + // create the container. + CheckpointAnnotationRootfsImageName = "io.podman.annotations.checkpoint.rootfsImageName" + + // CheckpointAnnotationPodmanVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of Podman used on the + // host where the checkpoint was created. + CheckpointAnnotationPodmanVersion = "io.podman.annotations.checkpoint.podman.version" + + // CheckpointAnnotationCriuVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of CRIU used on the + // host where the checkpoint was created. + CheckpointAnnotationCriuVersion = "io.podman.annotations.checkpoint.criu.version" + + // CheckpointAnnotationRuntimeName is used by Container Checkpoint when + // creating a checkpoint image to specify the runtime used on the host where + // the checkpoint was created. + CheckpointAnnotationRuntimeName = "io.podman.annotations.checkpoint.runtime.name" + + // CheckpointAnnotationRuntimeVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of runtime used on the + // host where the checkpoint was created. + CheckpointAnnotationRuntimeVersion = "io.podman.annotations.checkpoint.runtime.version" + + // CheckpointAnnotationConmonVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of conmon used on + // the host where the checkpoint was created. + CheckpointAnnotationConmonVersion = "io.podman.annotations.checkpoint.conmon.version" + + // CheckpointAnnotationHostArch is used by Container Checkpoint when + // creating a checkpoint image to specify the CPU architecture of the host + // on which the checkpoint was created. + CheckpointAnnotationHostArch = "io.podman.annotations.checkpoint.host.arch" + + // CheckpointAnnotationHostKernel is used by Container Checkpoint when + // creating a checkpoint image to specify the kernel version used by the + // host where the checkpoint was created. + CheckpointAnnotationHostKernel = "io.podman.annotations.checkpoint.host.kernel" + + // CheckpointAnnotationCgroupVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the cgroup version used by the + // host where the checkpoint was created. + CheckpointAnnotationCgroupVersion = "io.podman.annotations.checkpoint.cgroups.version" + + // CheckpointAnnotationDistributionVersion is used by Container Checkpoint + // when creating a checkpoint image to specify the version of host + // distribution on which the checkpoint was created. + CheckpointAnnotationDistributionVersion = "io.podman.annotations.checkpoint.distribution.version" + + // CheckpointAnnotationDistributionName is used by Container Checkpoint when + // creating a checkpoint image to specify the name of host distribution on + // which the checkpoint was created. + CheckpointAnnotationDistributionName = "io.podman.annotations.checkpoint.distribution.name" + // MaxKubeAnnotation is the max length of annotations allowed by Kubernetes. + MaxKubeAnnotation = 63 +) + +// IsReservedAnnotation returns true if the specified value corresponds to an +// already reserved annotation that Podman sets during container creation. +func IsReservedAnnotation(value string) bool { + switch value { + case InspectAnnotationCIDFile, InspectAnnotationAutoremove, InspectAnnotationVolumesFrom, InspectAnnotationPrivileged, InspectAnnotationPublishAll, InspectAnnotationInit, InspectAnnotationLabel, InspectAnnotationSeccomp, InspectAnnotationApparmor, InspectResponseTrue, InspectResponseFalse: + return true + + default: + return false + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go b/vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go new file mode 100644 index 00000000000..536bdde9a3c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go @@ -0,0 +1,32 @@ +package define + +// This contains values reported by CRIU during +// checkpointing or restoring. +// All names are the same as reported by CRIU. +type CRIUCheckpointRestoreStatistics struct { + // Checkpoint values + // Time required to freeze/pause/quiesce the processes + FreezingTime uint32 `json:"freezing_time,omitempty"` + // Time the processes are actually not running during checkpointing + FrozenTime uint32 `json:"frozen_time,omitempty"` + // Time required to extract memory pages from the processes + MemdumpTime uint32 `json:"memdump_time,omitempty"` + // Time required to write memory pages to disk + MemwriteTime uint32 `json:"memwrite_time,omitempty"` + // Number of memory pages CRIU analyzed + PagesScanned uint64 `json:"pages_scanned,omitempty"` + // Number of memory pages written + PagesWritten uint64 `json:"pages_written,omitempty"` + + // Restore values + // Number of pages compared during restore + PagesCompared uint64 `json:"pages_compared,omitempty"` + // Number of COW pages skipped during restore + PagesSkippedCow uint64 `json:"pages_skipped_cow,omitempty"` + // Time required to fork processes + ForkingTime uint32 `json:"forking_time,omitempty"` + // Time required to restore + RestoreTime uint32 `json:"restore_time,omitempty"` + // Number of memory pages restored + PagesRestored uint64 `json:"pages_restored,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/config.go b/vendor/github.com/containers/podman/v4/libpod/define/config.go new file mode 100644 index 00000000000..0181bd31ce0 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/config.go @@ -0,0 +1,95 @@ +package define + +import ( + "bufio" + "io" + "regexp" + + "github.com/containers/common/libnetwork/types" +) + +var ( + // DefaultSHMLockPath is the default path for SHM locks + DefaultSHMLockPath = "/libpod_lock" + // DefaultRootlessSHMLockPath is the default path for rootless SHM locks + DefaultRootlessSHMLockPath = "/libpod_rootless_lock" + + // NameRegex is a regular expression to validate container/pod names. + // This must NOT be changed from outside of Libpod. It should be a + // constant, but Go won't let us do that. + NameRegex = types.NameRegex + // RegexError is thrown in presence of an invalid container/pod name. + RegexError = types.RegexError + // UmaskRegex is a regular expression to validate Umask. + UmaskRegex = regexp.MustCompile(`^[0-7]{1,4}$`) +) + +const ( + // DefaultTransport is a prefix that we apply to an image name + // to check docker hub first for the image + DefaultTransport = "docker://" +) + +// InfoData holds the info type, i.e store, host etc and the data for each type +type InfoData struct { + Type string + Data map[string]interface{} +} + +// VolumeDriverLocal is the "local" volume driver. It is managed by libpod +// itself. +const VolumeDriverLocal = "local" + +const ( + OCIManifestDir = "oci-dir" + OCIArchive = "oci-archive" + V2s2ManifestDir = "docker-dir" + V2s2Archive = "docker-archive" +) + +// AttachStreams contains streams that will be attached to the container +type AttachStreams struct { + // OutputStream will be attached to container's STDOUT + OutputStream io.WriteCloser + // ErrorStream will be attached to container's STDERR + ErrorStream io.WriteCloser + // InputStream will be attached to container's STDIN + InputStream *bufio.Reader + // AttachOutput is whether to attach to STDOUT + // If false, stdout will not be attached + AttachOutput bool + // AttachError is whether to attach to STDERR + // If false, stdout will not be attached + AttachError bool + // AttachInput is whether to attach to STDIN + // If false, stdout will not be attached + AttachInput bool +} + +// JournaldLogging is the string conmon expects to specify journald logging +const JournaldLogging = "journald" + +// KubernetesLogging is the string conmon expects when specifying to use the kubernetes logging format +const KubernetesLogging = "k8s-file" + +// JSONLogging is the string conmon expects when specifying to use the json logging format +const JSONLogging = "json-file" + +// NoLogging is the string conmon expects when specifying to use no log driver whatsoever +const NoLogging = "none" + +// PassthroughLogging is the string conmon expects when specifying to use the passthrough driver +const PassthroughLogging = "passthrough" + +// Strings used for --sdnotify option to podman +const ( + SdNotifyModeContainer = "container" + SdNotifyModeConmon = "conmon" + SdNotifyModeIgnore = "ignore" +) + +// DefaultRlimitValue is the value set by default for nofile and nproc +const RLimitDefaultValue = uint64(1048576) + +// BindMountPrefix distinguishes its annotations from others +const BindMountPrefix = "bind-mount-options:" diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container.go b/vendor/github.com/containers/podman/v4/libpod/define/container.go new file mode 100644 index 00000000000..bb44a6a4a42 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/container.go @@ -0,0 +1,38 @@ +package define + +// Valid restart policy types. +const ( + // RestartPolicyNone indicates that no restart policy has been requested + // by a container. + RestartPolicyNone = "" + // RestartPolicyNo is identical in function to RestartPolicyNone. + RestartPolicyNo = "no" + // RestartPolicyAlways unconditionally restarts the container. + RestartPolicyAlways = "always" + // RestartPolicyOnFailure restarts the container on non-0 exit code, + // with an optional maximum number of retries. + RestartPolicyOnFailure = "on-failure" + // RestartPolicyUnlessStopped unconditionally restarts unless stopped + // by the user. It is identical to Always except with respect to + // handling of system restart, which Podman does not yet support. + RestartPolicyUnlessStopped = "unless-stopped" +) + +// RestartPolicyMap maps between restart-policy valid values to restart policy types +var RestartPolicyMap = map[string]string{ + "none": RestartPolicyNone, + RestartPolicyNo: RestartPolicyNo, + RestartPolicyAlways: RestartPolicyAlways, + RestartPolicyOnFailure: RestartPolicyOnFailure, + RestartPolicyUnlessStopped: RestartPolicyUnlessStopped, +} + +// InitContainerTypes +const ( + // AlwaysInitContainer is an init container than runs on each + // pod start (including restart) + AlwaysInitContainer = "always" + // OneShotInitContainer is a container that only runs as init once + // and is then deleted. + OneShotInitContainer = "once" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go new file mode 100644 index 00000000000..6cdffb8b7a6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go @@ -0,0 +1,761 @@ +package define + +import ( + "time" + + "github.com/containers/image/v5/manifest" +) + +type InspectIDMappings struct { + UIDMap []string `json:"UidMap"` + GIDMap []string `json:"GidMap"` +} + +// InspectContainerConfig holds further data about how a container was initially +// configured. +type InspectContainerConfig struct { + // Container hostname + Hostname string `json:"Hostname"` + // Container domain name - unused at present + DomainName string `json:"Domainname"` + // User the container was launched with + User string `json:"User"` + // Unused, at present + AttachStdin bool `json:"AttachStdin"` + // Unused, at present + AttachStdout bool `json:"AttachStdout"` + // Unused, at present + AttachStderr bool `json:"AttachStderr"` + // Whether the container creates a TTY + Tty bool `json:"Tty"` + // Whether the container leaves STDIN open + OpenStdin bool `json:"OpenStdin"` + // Whether STDIN is only left open once. + // Presently not supported by Podman, unused. + StdinOnce bool `json:"StdinOnce"` + // Container environment variables + Env []string `json:"Env"` + // Container command + Cmd []string `json:"Cmd"` + // Container image + Image string `json:"Image"` + // Unused, at present. I've never seen this field populated. + Volumes map[string]struct{} `json:"Volumes"` + // Container working directory + WorkingDir string `json:"WorkingDir"` + // Container entrypoint + Entrypoint string `json:"Entrypoint"` + // On-build arguments - presently unused. More of Buildah's domain. + OnBuild *string `json:"OnBuild"` + // Container labels + Labels map[string]string `json:"Labels"` + // Container annotations + Annotations map[string]string `json:"Annotations"` + // Container stop signal + StopSignal uint `json:"StopSignal"` + // Configured healthcheck for the container + Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` + // CreateCommand is the full command plus arguments of the process the + // container has been created with. + CreateCommand []string `json:"CreateCommand,omitempty"` + // Timezone is the timezone inside the container. + // Local means it has the same timezone as the host machine + Timezone string `json:"Timezone,omitempty"` + // SystemdMode is whether the container is running in systemd mode. In + // systemd mode, the container configuration is customized to optimize + // running systemd in the container. + SystemdMode bool `json:"SystemdMode,omitempty"` + // Umask is the umask inside the container. + Umask string `json:"Umask,omitempty"` + // Secrets are the secrets mounted in the container + Secrets []*InspectSecret `json:"Secrets,omitempty"` + // Timeout is time before container is killed by conmon + Timeout uint `json:"Timeout"` + // StopTimeout is time before container is stopped when calling stop + StopTimeout uint `json:"StopTimeout"` + // Passwd determines whether or not podman can add entries to /etc/passwd and /etc/group + Passwd *bool `json:"Passwd,omitempty"` + // ChrootDirs is an additional set of directories that need to be + // treated as root directories. Standard bind mounts will be mounted + // into paths relative to these directories. + ChrootDirs []string `json:"ChrootDirs,omitempty"` +} + +// InspectRestartPolicy holds information about the container's restart policy. +type InspectRestartPolicy struct { + // Name contains the container's restart policy. + // Allowable values are "no" or "" (take no action), + // "on-failure" (restart on non-zero exit code, with an optional max + // retry count), and "always" (always restart on container stop, unless + // explicitly requested by API). + // Note that this is NOT actually a name of any sort - the poor naming + // is for Docker compatibility. + Name string `json:"Name"` + // MaximumRetryCount is the maximum number of retries allowed if the + // "on-failure" restart policy is in use. Not used if "on-failure" is + // not set. + MaximumRetryCount uint `json:"MaximumRetryCount"` +} + +// InspectLogConfig holds information about a container's configured log driver +type InspectLogConfig struct { + Type string `json:"Type"` + Config map[string]string `json:"Config"` + // Path specifies a path to the log file + Path string `json:"Path"` + // Tag specifies a custom log tag for the container + Tag string `json:"Tag"` + // Size specifies a maximum size of the container log + Size string `json:"Size"` +} + +// InspectBlkioWeightDevice holds information about the relative weight +// of an individual device node. Weights are used in the I/O scheduler to give +// relative priority to some accesses. +type InspectBlkioWeightDevice struct { + // Path is the path to the device this applies to. + Path string `json:"Path"` + // Weight is the relative weight the scheduler will use when scheduling + // I/O. + Weight uint16 `json:"Weight"` +} + +// InspectBlkioThrottleDevice holds information about a speed cap for a device +// node. This cap applies to a specific operation (read, write, etc) on the given +// node. +type InspectBlkioThrottleDevice struct { + // Path is the path to the device this applies to. + Path string `json:"Path"` + // Rate is the maximum rate. It is in either bytes per second or iops + // per second, determined by where it is used - documentation will + // indicate which is appropriate. + Rate uint64 `json:"Rate"` +} + +// InspectUlimit is a ulimit that will be applied to the container. +type InspectUlimit struct { + // Name is the name (type) of the ulimit. + Name string `json:"Name"` + // Soft is the soft limit that will be applied. + Soft int64 `json:"Soft"` + // Hard is the hard limit that will be applied. + Hard int64 `json:"Hard"` +} + +// InspectDevice is a single device that will be mounted into the container. +type InspectDevice struct { + // PathOnHost is the path of the device on the host. + PathOnHost string `json:"PathOnHost"` + // PathInContainer is the path of the device within the container. + PathInContainer string `json:"PathInContainer"` + // CgroupPermissions is the permissions of the mounted device. + // Presently not populated. + // TODO. + CgroupPermissions string `json:"CgroupPermissions"` +} + +// InspectHostPort provides information on a port on the host that a container's +// port is bound to. +type InspectHostPort struct { + // IP on the host we are bound to. "" if not specified (binding to all + // IPs). + HostIP string `json:"HostIp"` + // Port on the host we are bound to. No special formatting - just an + // integer stuffed into a string. + HostPort string `json:"HostPort"` +} + +// InspectMount provides a record of a single mount in a container. It contains +// fields for both named and normal volumes. Only user-specified volumes will be +// included, and tmpfs volumes are not included even if the user specified them. +type InspectMount struct { + // Whether the mount is a volume or bind mount. Allowed values are + // "volume" and "bind". + Type string `json:"Type"` + // The name of the volume. Empty for bind mounts. + Name string `json:"Name,omitempty"` + // The source directory for the volume. + Source string `json:"Source"` + // The destination directory for the volume. Specified as a path within + // the container, as it would be passed into the OCI runtime. + Destination string `json:"Destination"` + // The driver used for the named volume. Empty for bind mounts. + Driver string `json:"Driver"` + // Contains SELinux :z/:Z mount options. Unclear what, if anything, else + // goes in here. + Mode string `json:"Mode"` + // All remaining mount options. Additional data, not present in the + // original output. + Options []string `json:"Options"` + // Whether the volume is read-write + RW bool `json:"RW"` + // Mount propagation for the mount. Can be empty if not specified, but + // is always printed - no omitempty. + Propagation string `json:"Propagation"` +} + +// InspectContainerState provides a detailed record of a container's current +// state. It is returned as part of InspectContainerData. +// As with InspectContainerData, many portions of this struct are matched to +// Docker, but here we see more fields that are unused (nonsensical in the +// context of Libpod). +type InspectContainerState struct { + OciVersion string `json:"OciVersion"` + Status string `json:"Status"` + Running bool `json:"Running"` + Paused bool `json:"Paused"` + Restarting bool `json:"Restarting"` // TODO + OOMKilled bool `json:"OOMKilled"` + Dead bool `json:"Dead"` + Pid int `json:"Pid"` + ConmonPid int `json:"ConmonPid,omitempty"` + ExitCode int32 `json:"ExitCode"` + Error string `json:"Error"` // TODO + StartedAt time.Time `json:"StartedAt"` + FinishedAt time.Time `json:"FinishedAt"` + Health HealthCheckResults `json:"Health,omitempty"` + Checkpointed bool `json:"Checkpointed,omitempty"` + CgroupPath string `json:"CgroupPath,omitempty"` + CheckpointedAt time.Time `json:"CheckpointedAt,omitempty"` + RestoredAt time.Time `json:"RestoredAt,omitempty"` + CheckpointLog string `json:"CheckpointLog,omitempty"` + CheckpointPath string `json:"CheckpointPath,omitempty"` + RestoreLog string `json:"RestoreLog,omitempty"` + Restored bool `json:"Restored,omitempty"` +} + +// Healthcheck returns the HealthCheckResults. This is used for old podman compat +// to make the "Healthcheck" key available in the go template. +func (s *InspectContainerState) Healthcheck() HealthCheckResults { + return s.Health +} + +// HealthCheckResults describes the results/logs from a healthcheck +type HealthCheckResults struct { + // Status healthy or unhealthy + Status string `json:"Status"` + // FailingStreak is the number of consecutive failed healthchecks + FailingStreak int `json:"FailingStreak"` + // Log describes healthcheck attempts and results + Log []HealthCheckLog `json:"Log"` +} + +// HealthCheckLog describes the results of a single healthcheck +type HealthCheckLog struct { + // Start time as string + Start string `json:"Start"` + // End time as a string + End string `json:"End"` + // Exitcode is 0 or 1 + ExitCode int `json:"ExitCode"` + // Output is the stdout/stderr from the healthcheck command + Output string `json:"Output"` +} + +// InspectContainerHostConfig holds information used when the container was +// created. +// It's very much a Docker-specific struct, retained (mostly) as-is for +// compatibility. We fill individual fields as best as we can, inferring as much +// as possible from the spec and container config. +// Some things cannot be inferred. These will be populated by spec annotations +// (if available). +// Field names are fixed for compatibility and cannot be changed. +// As such, silence lint warnings about them. +//nolint +type InspectContainerHostConfig struct { + // Binds contains an array of user-added mounts. + // Both volume mounts and named volumes are included. + // Tmpfs mounts are NOT included. + // In 'docker inspect' this is separated into 'Binds' and 'Mounts' based + // on how a mount was added. We do not make this distinction and do not + // include a Mounts field in inspect. + // Format: :[:] + Binds []string `json:"Binds"` + // CgroupManager is the cgroup manager used by the container. + // At present, allowed values are either "cgroupfs" or "systemd". + CgroupManager string `json:"CgroupManager,omitempty"` + // CgroupMode is the configuration of the container's cgroup namespace. + // Populated as follows: + // private - a cgroup namespace has been created + // host - No cgroup namespace created + // container: - Using another container's cgroup namespace + // ns: - A path to a cgroup namespace has been specified + CgroupMode string `json:"CgroupMode"` + // ContainerIDFile is a file created during container creation to hold + // the ID of the created container. + // This is not handled within libpod and is stored in an annotation. + ContainerIDFile string `json:"ContainerIDFile"` + // LogConfig contains information on the container's logging backend + LogConfig *InspectLogConfig `json:"LogConfig"` + // NetworkMode is the configuration of the container's network + // namespace. + // Populated as follows: + // default - A network namespace is being created and configured via CNI + // none - A network namespace is being created, not configured via CNI + // host - No network namespace created + // container: - Using another container's network namespace + // ns: - A path to a network namespace has been specified + NetworkMode string `json:"NetworkMode"` + // PortBindings contains the container's port bindings. + // It is formatted as map[string][]InspectHostPort. + // The string key here is formatted as / + // and represents the container port. A single container port may be + // bound to multiple host ports (on different IPs). + PortBindings map[string][]InspectHostPort `json:"PortBindings"` + // RestartPolicy contains the container's restart policy. + RestartPolicy *InspectRestartPolicy `json:"RestartPolicy"` + // AutoRemove is whether the container will be automatically removed on + // exiting. + // It is not handled directly within libpod and is stored in an + // annotation. + AutoRemove bool `json:"AutoRemove"` + // VolumeDriver is presently unused and is retained for Docker + // compatibility. + VolumeDriver string `json:"VolumeDriver"` + // VolumesFrom is a list of containers which this container uses volumes + // from. This is not handled directly within libpod and is stored in an + // annotation. + // It is formatted as an array of container names and IDs. + VolumesFrom []string `json:"VolumesFrom"` + // CapAdd is a list of capabilities added to the container. + // It is not directly stored by Libpod, and instead computed from the + // capabilities listed in the container's spec, compared against a set + // of default capabilities. + CapAdd []string `json:"CapAdd"` + // CapDrop is a list of capabilities removed from the container. + // It is not directly stored by libpod, and instead computed from the + // capabilities listed in the container's spec, compared against a set + // of default capabilities. + CapDrop []string `json:"CapDrop"` + // Dns is a list of DNS nameservers that will be added to the + // container's resolv.conf + Dns []string `json:"Dns"` + // DnsOptions is a list of DNS options that will be set in the + // container's resolv.conf + DnsOptions []string `json:"DnsOptions"` + // DnsSearch is a list of DNS search domains that will be set in the + // container's resolv.conf + DnsSearch []string `json:"DnsSearch"` + // ExtraHosts contains hosts that will be aded to the container's + // /etc/hosts. + ExtraHosts []string `json:"ExtraHosts"` + // GroupAdd contains groups that the user inside the container will be + // added to. + GroupAdd []string `json:"GroupAdd"` + // IpcMode represents the configuration of the container's IPC + // namespace. + // Populated as follows: + // "" (empty string) - Default, an IPC namespace will be created + // host - No IPC namespace created + // container: - Using another container's IPC namespace + // ns: - A path to an IPC namespace has been specified + IpcMode string `json:"IpcMode"` + // Cgroup contains the container's cgroup. It is presently not + // populated. + // TODO. + Cgroup string `json:"Cgroup"` + // Cgroups contains the container's Cgroup mode. + // Allowed values are "default" (container is creating Cgroups) and + // "disabled" (container is not creating Cgroups). + // This is Libpod-specific and not included in `docker inspect`. + Cgroups string `json:"Cgroups"` + // Links is unused, and provided purely for Docker compatibility. + Links []string `json:"Links"` + // OOMScoreAdj is an adjustment that will be made to the container's OOM + // score. + OomScoreAdj int `json:"OomScoreAdj"` + // PidMode represents the configuration of the container's PID + // namespace. + // Populated as follows: + // "" (empty string) - Default, a PID namespace will be created + // host - No PID namespace created + // container: - Using another container's PID namespace + // ns: - A path to a PID namespace has been specified + PidMode string `json:"PidMode"` + // Privileged indicates whether the container is running with elevated + // privileges. + // This has a very specific meaning in the Docker sense, so it's very + // difficult to decode from the spec and config, and so is stored as an + // annotation. + Privileged bool `json:"Privileged"` + // PublishAllPorts indicates whether image ports are being published. + // This is not directly stored in libpod and is saved as an annotation. + PublishAllPorts bool `json:"PublishAllPorts"` + // ReadonlyRootfs is whether the container will be mounted read-only. + ReadonlyRootfs bool `json:"ReadonlyRootfs"` + // SecurityOpt is a list of security-related options that are set in the + // container. + SecurityOpt []string `json:"SecurityOpt"` + // Tmpfs is a list of tmpfs filesystems that will be mounted into the + // container. + // It is a map of destination path to options for the mount. + Tmpfs map[string]string `json:"Tmpfs"` + // UTSMode represents the configuration of the container's UID + // namespace. + // Populated as follows: + // "" (empty string) - Default, a UTS namespace will be created + // host - no UTS namespace created + // container: - Using another container's UTS namespace + // ns: - A path to a UTS namespace has been specified + UTSMode string `json:"UTSMode"` + // UsernsMode represents the configuration of the container's user + // namespace. + // When running rootless, a user namespace is created outside of libpod + // to allow some privileged operations. This will not be reflected here. + // Populated as follows: + // "" (empty string) - No user namespace will be created + // private - The container will be run in a user namespace + // container: - Using another container's user namespace + // ns: - A path to a user namespace has been specified + // TODO Rootless has an additional 'keep-id' option, presently not + // reflected here. + UsernsMode string `json:"UsernsMode"` + // IDMappings is the UIDMapping and GIDMapping used within the container + IDMappings *InspectIDMappings `json:"IDMappings,omitempty"` + // ShmSize is the size of the container's SHM device. + + ShmSize int64 `json:"ShmSize"` + // Runtime is provided purely for Docker compatibility. + // It is set unconditionally to "oci" as Podman does not presently + // support non-OCI runtimes. + Runtime string `json:"Runtime"` + // ConsoleSize is an array of 2 integers showing the size of the + // container's console. + // It is only set if the container is creating a terminal. + // TODO. + ConsoleSize []uint `json:"ConsoleSize"` + // Isolation is presently unused and provided solely for Docker + // compatibility. + Isolation string `json:"Isolation"` + // CpuShares indicates the CPU resources allocated to the container. + // It is a relative weight in the scheduler for assigning CPU time + // versus other Cgroups. + CpuShares uint64 `json:"CpuShares"` + // Memory indicates the memory resources allocated to the container. + // This is the limit (in bytes) of RAM the container may use. + Memory int64 `json:"Memory"` + // NanoCpus indicates number of CPUs allocated to the container. + // It is an integer where one full CPU is indicated by 1000000000 (one + // billion). + // Thus, 2.5 CPUs (fractional portions of CPUs are allowed) would be + // 2500000000 (2.5 billion). + // In 'docker inspect' this is set exclusively of two further options in + // the output (CpuPeriod and CpuQuota) which are both used to implement + // this functionality. + // We can't distinguish here, so if CpuQuota is set to the default of + // 100000, we will set both CpuQuota, CpuPeriod, and NanoCpus. If + // CpuQuota is not the default, we will not set NanoCpus. + NanoCpus int64 `json:"NanoCpus"` + // CgroupParent is the Cgroup parent of the container. + // Only set if not default. + CgroupParent string `json:"CgroupParent"` + // BlkioWeight indicates the I/O resources allocated to the container. + // It is a relative weight in the scheduler for assigning I/O time + // versus other Cgroups. + BlkioWeight uint16 `json:"BlkioWeight"` + // BlkioWeightDevice is an array of I/O resource priorities for + // individual device nodes. + // Unfortunately, the spec only stores the device's Major/Minor numbers + // and not the path, which is used here. + // Fortunately, the kernel provides an interface for retrieving the path + // of a given node by major:minor at /sys/dev/. However, the exact path + // in use may not be what was used in the original CLI invocation - + // though it is guaranteed that the device node will be the same, and + // using the given path will be functionally identical. + BlkioWeightDevice []InspectBlkioWeightDevice `json:"BlkioWeightDevice"` + // BlkioDeviceReadBps is an array of I/O throttle parameters for + // individual device nodes. + // This specifically sets read rate cap in bytes per second for device + // nodes. + // As with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadBps"` + // BlkioDeviceWriteBps is an array of I/O throttle parameters for + // individual device nodes. + // this specifically sets write rate cap in bytes per second for device + // nodes. + // as with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceWriteBps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteBps"` + // BlkioDeviceReadIOps is an array of I/O throttle parameters for + // individual device nodes. + // This specifically sets the read rate cap in iops per second for + // device nodes. + // As with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceReadIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceReadIOps"` + // BlkioDeviceWriteIOps is an array of I/O throttle parameters for + // individual device nodes. + // This specifically sets the write rate cap in iops per second for + // device nodes. + // As with BlkioWeightDevice, we pull the path from /sys/dev, and we + // don't guarantee the path will be identical to the original (though + // the node will be). + BlkioDeviceWriteIOps []InspectBlkioThrottleDevice `json:"BlkioDeviceWriteIOps"` + // CpuPeriod is the length of a CPU period in microseconds. + // It relates directly to CpuQuota. + CpuPeriod uint64 `json:"CpuPeriod"` + // CpuPeriod is the amount of time (in microseconds) that a container + // can use the CPU in every CpuPeriod. + CpuQuota int64 `json:"CpuQuota"` + // CpuRealtimePeriod is the length of time (in microseconds) of the CPU + // realtime period. If set to 0, no time will be allocated to realtime + // tasks. + CpuRealtimePeriod uint64 `json:"CpuRealtimePeriod"` + // CpuRealtimeRuntime is the length of time (in microseconds) allocated + // for realtime tasks within every CpuRealtimePeriod. + CpuRealtimeRuntime int64 `json:"CpuRealtimeRuntime"` + // CpusetCpus is the is the set of CPUs that the container will execute + // on. Formatted as `0-3` or `0,2`. Default (if unset) is all CPUs. + CpusetCpus string `json:"CpusetCpus"` + // CpusetMems is the set of memory nodes the container will use. + // Formatted as `0-3` or `0,2`. Default (if unset) is all memory nodes. + CpusetMems string `json:"CpusetMems"` + // Devices is a list of device nodes that will be added to the + // container. + // These are stored in the OCI spec only as type, major, minor while we + // display the host path. We convert this with /sys/dev, but we cannot + // guarantee that the host path will be identical - only that the actual + // device will be. + Devices []InspectDevice `json:"Devices"` + // DiskQuota is the maximum amount of disk space the container may use + // (in bytes). + // Presently not populated. + // TODO. + DiskQuota uint64 `json:"DiskQuota"` + // KernelMemory is the maximum amount of memory the kernel will devote + // to the container. + KernelMemory int64 `json:"KernelMemory"` + // MemoryReservation is the reservation (soft limit) of memory available + // to the container. Soft limits are warnings only and can be exceeded. + MemoryReservation int64 `json:"MemoryReservation"` + // MemorySwap is the total limit for all memory available to the + // container, including swap. 0 indicates that there is no limit to the + // amount of memory available. + MemorySwap int64 `json:"MemorySwap"` + // MemorySwappiness is the willingness of the kernel to page container + // memory to swap. It is an integer from 0 to 100, with low numbers + // being more likely to be put into swap. + // -1, the default, will not set swappiness and use the system defaults. + MemorySwappiness int64 `json:"MemorySwappiness"` + // OomKillDisable indicates whether the kernel OOM killer is disabled + // for the container. + OomKillDisable bool `json:"OomKillDisable"` + // Init indicates whether the container has an init mounted into it. + Init bool `json:"Init,omitempty"` + // PidsLimit is the maximum number of PIDs what may be created within + // the container. 0, the default, indicates no limit. + PidsLimit int64 `json:"PidsLimit"` + // Ulimits is a set of ulimits that will be set within the container. + Ulimits []InspectUlimit `json:"Ulimits"` + // CpuCount is Windows-only and not presently implemented. + CpuCount uint64 `json:"CpuCount"` + // CpuPercent is Windows-only and not presently implemented. + CpuPercent uint64 `json:"CpuPercent"` + // IOMaximumIOps is Windows-only and not presently implemented. + IOMaximumIOps uint64 `json:"IOMaximumIOps"` + // IOMaximumBandwidth is Windows-only and not presently implemented. + IOMaximumBandwidth uint64 `json:"IOMaximumBandwidth"` + // CgroupConf is the configuration for cgroup v2. + CgroupConf map[string]string `json:"CgroupConf"` +} + +// Address represents an IP address. +type Address struct { + Addr string + PrefixLength int +} + +// InspectBasicNetworkConfig holds basic configuration information (e.g. IP +// addresses, MAC address, subnet masks, etc) that are common for all networks +// (both additional and main). +type InspectBasicNetworkConfig struct { + // EndpointID is unused, maintained exclusively for compatibility. + EndpointID string `json:"EndpointID"` + // Gateway is the IP address of the gateway this network will use. + Gateway string `json:"Gateway"` + // IPAddress is the IP address for this network. + IPAddress string `json:"IPAddress"` + // IPPrefixLen is the length of the subnet mask of this network. + IPPrefixLen int `json:"IPPrefixLen"` + // SecondaryIPAddresses is a list of extra IP Addresses that the + // container has been assigned in this network. + SecondaryIPAddresses []Address `json:"SecondaryIPAddresses,omitempty"` + // IPv6Gateway is the IPv6 gateway this network will use. + IPv6Gateway string `json:"IPv6Gateway"` + // GlobalIPv6Address is the global-scope IPv6 Address for this network. + GlobalIPv6Address string `json:"GlobalIPv6Address"` + // GlobalIPv6PrefixLen is the length of the subnet mask of this network. + GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen"` + // SecondaryIPv6Addresses is a list of extra IPv6 Addresses that the + // container has been assigned in this network. + SecondaryIPv6Addresses []Address `json:"SecondaryIPv6Addresses,omitempty"` + // MacAddress is the MAC address for the interface in this network. + MacAddress string `json:"MacAddress"` + // AdditionalMacAddresses is a set of additional MAC Addresses beyond + // the first. CNI may configure more than one interface for a single + // network, which can cause this. + AdditionalMacAddresses []string `json:"AdditionalMACAddresses,omitempty"` +} + +// InspectAdditionalNetwork holds information about non-default CNI networks the +// container has been connected to. +// As with InspectNetworkSettings, many fields are unused and maintained only +// for compatibility with Docker. +type InspectAdditionalNetwork struct { + InspectBasicNetworkConfig + + // Name of the network we're connecting to. + NetworkID string `json:"NetworkID,omitempty"` + // DriverOpts is presently unused and maintained exclusively for + // compatibility. + DriverOpts map[string]string `json:"DriverOpts"` + // IPAMConfig is presently unused and maintained exclusively for + // compatibility. + IPAMConfig map[string]string `json:"IPAMConfig"` + // Links is presently unused and maintained exclusively for + // compatibility. + Links []string `json:"Links"` + // Aliases are any network aliases the container has in this network. + Aliases []string `json:"Aliases,omitempty"` +} + +// InspectNetworkSettings holds information about the network settings of the +// container. +// Many fields are maintained only for compatibility with `docker inspect` and +// are unused within Libpod. +type InspectNetworkSettings struct { + InspectBasicNetworkConfig + + Bridge string `json:"Bridge"` + SandboxID string `json:"SandboxID"` + HairpinMode bool `json:"HairpinMode"` + LinkLocalIPv6Address string `json:"LinkLocalIPv6Address"` + LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen"` + Ports map[string][]InspectHostPort `json:"Ports"` + SandboxKey string `json:"SandboxKey"` + // Networks contains information on non-default CNI networks this + // container has joined. + // It is a map of network name to network information. + Networks map[string]*InspectAdditionalNetwork `json:"Networks,omitempty"` +} + +// InspectContainerData provides a detailed record of a container's configuration +// and state as viewed by Libpod. +// Large portions of this structure are defined such that the output is +// compatible with `docker inspect` JSON, but additional fields have been added +// as required to share information not in the original output. +type InspectContainerData struct { + ID string `json:"Id"` + Created time.Time `json:"Created"` + Path string `json:"Path"` + Args []string `json:"Args"` + State *InspectContainerState `json:"State"` + Image string `json:"Image"` + ImageName string `json:"ImageName"` + Rootfs string `json:"Rootfs"` + Pod string `json:"Pod"` + ResolvConfPath string `json:"ResolvConfPath"` + HostnamePath string `json:"HostnamePath"` + HostsPath string `json:"HostsPath"` + StaticDir string `json:"StaticDir"` + OCIConfigPath string `json:"OCIConfigPath,omitempty"` + OCIRuntime string `json:"OCIRuntime,omitempty"` + ConmonPidFile string `json:"ConmonPidFile"` + PidFile string `json:"PidFile"` + Name string `json:"Name"` + RestartCount int32 `json:"RestartCount"` + Driver string `json:"Driver"` + MountLabel string `json:"MountLabel"` + ProcessLabel string `json:"ProcessLabel"` + AppArmorProfile string `json:"AppArmorProfile"` + EffectiveCaps []string `json:"EffectiveCaps"` + BoundingCaps []string `json:"BoundingCaps"` + ExecIDs []string `json:"ExecIDs"` + GraphDriver *DriverData `json:"GraphDriver"` + SizeRw *int64 `json:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty"` + Mounts []InspectMount `json:"Mounts"` + Dependencies []string `json:"Dependencies"` + NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` + Namespace string `json:"Namespace"` + IsInfra bool `json:"IsInfra"` + Config *InspectContainerConfig `json:"Config"` + HostConfig *InspectContainerHostConfig `json:"HostConfig"` +} + +// InspectExecSession contains information about a given exec session. +type InspectExecSession struct { + // CanRemove is legacy and used purely for compatibility reasons. + // Will always be set to true, unless the exec session is running. + CanRemove bool `json:"CanRemove"` + // ContainerID is the ID of the container this exec session is attached + // to. + ContainerID string `json:"ContainerID"` + // DetachKeys are the detach keys used by the exec session. + // If set to "" the default keys are being used. + // Will show "" if no detach keys are set. + DetachKeys string `json:"DetachKeys"` + // ExitCode is the exit code of the exec session. Will be set to 0 if + // the exec session has not yet exited. + ExitCode int `json:"ExitCode"` + // ID is the ID of the exec session. + ID string `json:"ID"` + // OpenStderr is whether the container's STDERR stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStderr bool `json:"OpenStderr"` + // OpenStdin is whether the container's STDIN stream will be attached + // to. + OpenStdin bool `json:"OpenStdin"` + // OpenStdout is whether the container's STDOUT stream will be attached. + // Always set to true if the exec session created a TTY. + OpenStdout bool `json:"OpenStdout"` + // Running is whether the exec session is running. + Running bool `json:"Running"` + // Pid is the PID of the exec session's process. + // Will be set to 0 if the exec session is not running. + Pid int `json:"Pid"` + // ProcessConfig contains information about the exec session's process. + ProcessConfig *InspectExecProcess `json:"ProcessConfig"` +} + +// InspectExecProcess contains information about the process in a given exec +// session. +type InspectExecProcess struct { + // Arguments are the arguments to the entrypoint command of the exec + // session. + Arguments []string `json:"arguments"` + // Entrypoint is the entrypoint for the exec session (the command that + // will be executed in the container). + Entrypoint string `json:"entrypoint"` + // Privileged is whether the exec session will be started with elevated + // privileges. + Privileged bool `json:"privileged"` + // Tty is whether the exec session created a terminal. + Tty bool `json:"tty"` + // User is the user the exec session was started as. + User string `json:"user"` +} + +// DriverData handles the data for a storage driver +type DriverData struct { + Name string `json:"Name"` + Data map[string]string `json:"Data"` +} + +// InspectSecret contains information on secrets mounted inside the container +type InspectSecret struct { + // Name is the name of the secret + Name string `json:"Name"` + // ID is the ID of the secret + ID string `json:"ID"` + // ID is the UID of the mounted secret file + UID uint32 `json:"UID"` + // ID is the GID of the mounted secret file + GID uint32 `json:"GID"` + // ID is the ID of the mode of the mounted secret file + Mode uint32 `json:"Mode"` +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go new file mode 100644 index 00000000000..9ad3aec08ed --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go @@ -0,0 +1,152 @@ +package define + +import ( + "time" + + "github.com/pkg/errors" +) + +// ContainerStatus represents the current state of a container +type ContainerStatus int + +const ( + // ContainerStateUnknown indicates that the container is in an error + // state where information about it cannot be retrieved + ContainerStateUnknown ContainerStatus = iota + // ContainerStateConfigured indicates that the container has had its + // storage configured but it has not been created in the OCI runtime + ContainerStateConfigured ContainerStatus = iota + // ContainerStateCreated indicates the container has been created in + // the OCI runtime but not started + ContainerStateCreated ContainerStatus = iota + // ContainerStateRunning indicates the container is currently executing + ContainerStateRunning ContainerStatus = iota + // ContainerStateStopped indicates that the container was running but has + // exited + ContainerStateStopped ContainerStatus = iota + // ContainerStatePaused indicates that the container has been paused + ContainerStatePaused ContainerStatus = iota + // ContainerStateExited indicates the the container has stopped and been + // cleaned up + ContainerStateExited ContainerStatus = iota + // ContainerStateRemoving indicates the container is in the process of + // being removed. + ContainerStateRemoving ContainerStatus = iota + // ContainerStateStopping indicates the container is in the process of + // being stopped. + ContainerStateStopping ContainerStatus = iota +) + +// ContainerStatus returns a string representation for users of a container +// state. All results should match Docker's versions (from `docker ps`) as +// closely as possible, given the different set of states we support. +func (t ContainerStatus) String() string { + switch t { + case ContainerStateUnknown: + return "unknown" + case ContainerStateConfigured: + // The naming here is confusing, but it's necessary for Docker + // compatibility - their Created state is our Configured state. + return "created" + case ContainerStateCreated: + // Docker does not have an equivalent to this state, so give it + // a clear name. Most of the time this is a purely transitory + // state between Configured and Running so we don't expect to + // see it much anyways. + return "initialized" + case ContainerStateRunning: + return "running" + case ContainerStateStopped: + return "stopped" + case ContainerStatePaused: + return "paused" + case ContainerStateExited: + return "exited" + case ContainerStateRemoving: + return "removing" + case ContainerStateStopping: + return "stopping" + } + return "bad state" +} + +// StringToContainerStatus converts a string representation of a containers +// status into an actual container status type +func StringToContainerStatus(status string) (ContainerStatus, error) { + switch status { + case ContainerStateUnknown.String(): + return ContainerStateUnknown, nil + case ContainerStateConfigured.String(): + return ContainerStateConfigured, nil + case ContainerStateCreated.String(): + return ContainerStateCreated, nil + case ContainerStateRunning.String(): + return ContainerStateRunning, nil + case ContainerStateStopped.String(): + return ContainerStateStopped, nil + case ContainerStatePaused.String(): + return ContainerStatePaused, nil + case ContainerStateExited.String(): + return ContainerStateExited, nil + case ContainerStateRemoving.String(): + return ContainerStateRemoving, nil + default: + return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status) + } +} + +// ContainerExecStatus is the status of an exec session within a container. +type ContainerExecStatus int + +const ( + // ExecStateUnknown indicates that the state of the exec session is not + // known. + ExecStateUnknown ContainerExecStatus = iota + // ExecStateCreated indicates that the exec session has been created but + // not yet started + ExecStateCreated ContainerExecStatus = iota + // ExecStateRunning indicates that the exec session has been started but + // has not yet exited. + ExecStateRunning ContainerExecStatus = iota + // ExecStateStopped indicates that the exec session has stopped and is + // no longer running. + ExecStateStopped ContainerExecStatus = iota +) + +// String returns a string representation of a given exec state. +func (s ContainerExecStatus) String() string { + switch s { + case ExecStateUnknown: + return "unknown" + case ExecStateCreated: + return "created" + case ExecStateRunning: + return "running" + case ExecStateStopped: + return "stopped" + default: + return "bad state" + } +} + +// ContainerStats contains the statistics information for a running container +type ContainerStats struct { + AvgCPU float64 + ContainerID string + Name string + PerCPU []uint64 + CPU float64 + CPUNano uint64 + CPUSystemNano uint64 + SystemNano uint64 + MemUsage uint64 + MemLimit uint64 + MemPerc float64 + NetInput uint64 + NetOutput uint64 + BlockInput uint64 + BlockOutput uint64 + PIDs uint64 + UpTime time.Duration + Duration uint64 +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/diff.go b/vendor/github.com/containers/podman/v4/libpod/define/diff.go new file mode 100644 index 00000000000..ee492eb3a57 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/diff.go @@ -0,0 +1,26 @@ +package define + +// extra type to use as enum +type DiffType uint8 + +const ( + // only diff containers + DiffContainer DiffType = 1 << iota + // only diff images + DiffImage + // diff both containers and images + DiffAll DiffType = 0b11111111 +) + +func (d DiffType) String() string { + switch d { + case DiffAll: + return "all" + case DiffContainer: + return "container" + case DiffImage: + return "image" + default: + return "unknown" + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/errors.go b/vendor/github.com/containers/podman/v4/libpod/define/errors.go new file mode 100644 index 00000000000..f5a7c73e5dd --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/errors.go @@ -0,0 +1,206 @@ +package define + +import ( + "errors" + "fmt" + + "github.com/containers/common/libnetwork/types" +) + +var ( + // ErrNoSuchCtr indicates the requested container does not exist + ErrNoSuchCtr = errors.New("no such container") + + // ErrNoSuchPod indicates the requested pod does not exist + ErrNoSuchPod = errors.New("no such pod") + + // ErrNoSuchVolume indicates the requested volume does not exist + ErrNoSuchVolume = errors.New("no such volume") + + // ErrNoSuchNetwork indicates the requested network does not exist + ErrNoSuchNetwork = types.ErrNoSuchNetwork + + // ErrNoSuchExecSession indicates that the requested exec session does + // not exist. + ErrNoSuchExecSession = errors.New("no such exec session") + + // ErrDepExists indicates that the current object has dependencies and + // cannot be removed before them. + ErrDepExists = errors.New("dependency exists") + + // ErrNoAliases indicates that the container does not have any network + // aliases. + ErrNoAliases = errors.New("no aliases for container") + + // ErrMissingPlugin indicates that the requested operation requires a + // plugin that is not present on the system or in the configuration. + ErrMissingPlugin = errors.New("required plugin missing") + + // ErrCtrExists indicates a container with the same name or ID already + // exists + ErrCtrExists = errors.New("container already exists") + // ErrPodExists indicates a pod with the same name or ID already exists + ErrPodExists = errors.New("pod already exists") + // ErrImageExists indicates an image with the same ID already exists + ErrImageExists = errors.New("image already exists") + // ErrVolumeExists indicates a volume with the same name already exists + ErrVolumeExists = errors.New("volume already exists") + // ErrExecSessionExists indicates an exec session with the same ID + // already exists. + ErrExecSessionExists = errors.New("exec session already exists") + // ErrNetworkExists indicates that a network with the given name already + // exists. + ErrNetworkExists = types.ErrNetworkExists + + // ErrCtrStateInvalid indicates a container is in an improper state for + // the requested operation + ErrCtrStateInvalid = errors.New("container state improper") + // ErrExecSessionStateInvalid indicates that an exec session is in an + // improper state for the requested operation + ErrExecSessionStateInvalid = errors.New("exec session state improper") + // ErrVolumeBeingUsed indicates that a volume is being used by at least one container + ErrVolumeBeingUsed = errors.New("volume is being used") + + // ErrRuntimeFinalized indicates that the runtime has already been + // created and cannot be modified + ErrRuntimeFinalized = errors.New("runtime has been finalized") + // ErrCtrFinalized indicates that the container has already been created + // and cannot be modified + ErrCtrFinalized = errors.New("container has been finalized") + // ErrPodFinalized indicates that the pod has already been created and + // cannot be modified + ErrPodFinalized = errors.New("pod has been finalized") + // ErrVolumeFinalized indicates that the volume has already been created and + // cannot be modified + ErrVolumeFinalized = errors.New("volume has been finalized") + + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = types.ErrInvalidArg + // ErrEmptyID indicates that an empty ID was passed + ErrEmptyID = errors.New("name or ID cannot be empty") + + // ErrInternal indicates an internal library error + ErrInternal = errors.New("internal libpod error") + + // ErrPodPartialFail indicates that a pod operation was only partially + // successful, and some containers within the pod failed. + ErrPodPartialFail = errors.New("some containers failed") + + // ErrDetach indicates that an attach session was manually detached by + // the user. + ErrDetach = errors.New("detached from container") + + // ErrWillDeadlock indicates that the requested operation will cause a + // deadlock. This is usually caused by upgrade issues, and is resolved + // by renumbering the locks. + ErrWillDeadlock = errors.New("deadlock due to lock mismatch") + + // ErrNoCgroups indicates that the container does not have its own + // Cgroup. + ErrNoCgroups = errors.New("this container does not have a cgroup") + // ErrNoLogs indicates that this container is not creating a log so log + // operations cannot be performed on it + ErrNoLogs = errors.New("this container is not logging output") + + // ErrRootless indicates that the given command cannot but run without + // root. + ErrRootless = errors.New("operation requires root privileges") + + // ErrRuntimeStopped indicates that the runtime has already been shut + // down and no further operations can be performed on it + ErrRuntimeStopped = errors.New("runtime has already been stopped") + // ErrCtrStopped indicates that the requested container is not running + // and the requested operation cannot be performed until it is started + ErrCtrStopped = errors.New("container is stopped") + + // ErrCtrRemoved indicates that the container has already been removed + // and no further operations can be performed on it + ErrCtrRemoved = errors.New("container has already been removed") + // ErrPodRemoved indicates that the pod has already been removed and no + // further operations can be performed on it + ErrPodRemoved = errors.New("pod has already been removed") + // ErrVolumeRemoved indicates that the volume has already been removed and + // no further operations can be performed on it + ErrVolumeRemoved = errors.New("volume has already been removed") + // ErrExecSessionRemoved indicates that the exec session has already + // been removed and no further operations can be performed on it. + ErrExecSessionRemoved = errors.New("exec session has already been removed") + + // ErrDBClosed indicates that the connection to the state database has + // already been closed + ErrDBClosed = errors.New("database connection already closed") + // ErrDBBadConfig indicates that the database has a different schema or + // was created by a libpod with a different config + ErrDBBadConfig = errors.New("database configuration mismatch") + + // ErrNSMismatch indicates that the requested pod or container is in a + // different namespace and cannot be accessed or modified. + ErrNSMismatch = errors.New("target is in a different namespace") + + // ErrNotImplemented indicates that the requested functionality is not + // yet present + ErrNotImplemented = errors.New("not yet implemented") + + // ErrOSNotSupported indicates the function is not available on the particular + // OS. + ErrOSNotSupported = errors.New("no support for this OS yet") + + // ErrOCIRuntime indicates a generic error from the OCI runtime + ErrOCIRuntime = errors.New("OCI runtime error") + + // ErrOCIRuntimePermissionDenied indicates the OCI runtime attempted to invoke a command that returned + // a permission denied error + ErrOCIRuntimePermissionDenied = errors.New("OCI permission denied") + + // ErrOCIRuntimeNotFound indicates the OCI runtime attempted to invoke a command + // that was not found + ErrOCIRuntimeNotFound = errors.New("OCI runtime attempted to invoke a command that was not found") + + // ErrOCIRuntimeUnavailable indicates that the OCI runtime associated to a container + // could not be found in the configuration + ErrOCIRuntimeUnavailable = errors.New("OCI unavailable") + + // ErrConmonOutdated indicates the version of conmon found (whether via the configuration or $PATH) + // is out of date for the current podman version + ErrConmonOutdated = errors.New("outdated conmon version") + // ErrConmonDead indicates that the container's conmon process has been + // killed, preventing normal operation. + ErrConmonDead = errors.New("conmon process killed") + + // ErrNetworkOnPodContainer indicates the user wishes to alter network attributes on a container + // in a pod. This cannot be done as the infra container has all the network information + ErrNetworkOnPodContainer = errors.New("network cannot be configured when it is shared with a pod") + + // ErrNetworkInUse indicates the requested operation failed because the network was in use + ErrNetworkInUse = errors.New("network is being used") + + // ErrStoreNotInitialized indicates that the container storage was never + // initialized. + ErrStoreNotInitialized = errors.New("the container storage was never initialized") + + // ErrNoNetwork indicates that a container has no net namespace, like network=none + ErrNoNetwork = errors.New("container has no network namespace") + + // ErrNetworkModeInvalid indicates that a container has the wrong network mode for an operation + ErrNetworkModeInvalid = errors.New("invalid network mode") + + // ErrSetSecurityAttribute indicates that a request to set a container's security attribute + // was not possible. + ErrSetSecurityAttribute = fmt.Errorf("%w: unable to assign security attribute", ErrOCIRuntime) + + // ErrGetSecurityAttribute indicates that a request to get a container's security attribute + // was not possible. + ErrGetSecurityAttribute = fmt.Errorf("%w: unable to get security attribute", ErrOCIRuntime) + + // ErrSecurityAttribute indicates that an error processing security attributes + // for the container + ErrSecurityAttribute = fmt.Errorf("%w: unable to process security attribute", ErrOCIRuntime) + + // ErrCanceled indicates that an operation has been cancelled by a user. + // Useful for potentially long running tasks. + ErrCanceled = errors.New("cancelled by user") + + // ErrConmonVersionFormat is used when the expected versio-format of conmon + // has changed. + ErrConmonVersionFormat = "conmon version changed format" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go b/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go new file mode 100644 index 00000000000..f94616b3327 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go @@ -0,0 +1,50 @@ +package define + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // ExecErrorCodeGeneric is the default error code to return from an exec session if libpod failed + // prior to calling the runtime + ExecErrorCodeGeneric = 125 + // ExecErrorCodeCannotInvoke is the error code to return when the runtime fails to invoke a command + // an example of this can be found by trying to execute a directory: + // `podman exec -l /etc` + ExecErrorCodeCannotInvoke = 126 + // ExecErrorCodeNotFound is the error code to return when a command cannot be found + ExecErrorCodeNotFound = 127 +) + +// TranslateExecErrorToExitCode takes an error and checks whether it +// has a predefined exit code associated. If so, it returns that, otherwise it returns +// the exit code originally stated in libpod.Exec() +func TranslateExecErrorToExitCode(originalEC int, err error) int { + if errors.Cause(err) == ErrOCIRuntimePermissionDenied { + return ExecErrorCodeCannotInvoke + } + if errors.Cause(err) == ErrOCIRuntimeNotFound { + return ExecErrorCodeNotFound + } + return originalEC +} + +// ExitCode reads the error message when failing to executing container process +// and then returns 0 if no error, ExecErrorCodeNotFound if command does not exist, or ExecErrorCodeCannotInvoke for +// all other errors +func ExitCode(err error) int { + if err == nil { + return 0 + } + e := strings.ToLower(err.Error()) + logrus.Debugf("ExitCode msg: %q", e) + if strings.Contains(e, "not found") || + strings.Contains(e, "no such file") { + return ExecErrorCodeNotFound + } + + return ExecErrorCodeCannotInvoke +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/fileinfo.go b/vendor/github.com/containers/podman/v4/libpod/define/fileinfo.go new file mode 100644 index 00000000000..eec99e30059 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/fileinfo.go @@ -0,0 +1,16 @@ +package define + +import ( + "os" + "time" +) + +// FileInfo describes the attributes of a file or directory. +type FileInfo struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + ModTime time.Time `json:"mtime"` + IsDir bool `json:"isDir"` + LinkTarget string `json:"linkTarget"` +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go b/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go new file mode 100644 index 00000000000..bde449d3077 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go @@ -0,0 +1,49 @@ +package define + +const ( + // HealthCheckHealthy describes a healthy container + HealthCheckHealthy string = "healthy" + // HealthCheckUnhealthy describes an unhealthy container + HealthCheckUnhealthy string = "unhealthy" + // HealthCheckStarting describes the time between when the container starts + // and the start-period (time allowed for the container to start and application + // to be running) expires. + HealthCheckStarting string = "starting" +) + +// HealthCheckStatus represents the current state of a container +type HealthCheckStatus int + +const ( + // HealthCheckSuccess means the health worked + HealthCheckSuccess HealthCheckStatus = iota + // HealthCheckFailure means the health ran and failed + HealthCheckFailure HealthCheckStatus = iota + // HealthCheckContainerStopped means the health check cannot + // be run because the container is stopped + HealthCheckContainerStopped HealthCheckStatus = iota + // HealthCheckContainerNotFound means the container could + // not be found in local store + HealthCheckContainerNotFound HealthCheckStatus = iota + // HealthCheckNotDefined means the container has no health + // check defined in it + HealthCheckNotDefined HealthCheckStatus = iota + // HealthCheckInternalError means some something failed obtaining or running + // a given health check + HealthCheckInternalError HealthCheckStatus = iota + // HealthCheckDefined means the healthcheck was found on the container + HealthCheckDefined HealthCheckStatus = iota +) + +// Healthcheck defaults. These are used both in the cli as well in +// libpod and were moved from cmd/podman/common +const ( + // DefaultHealthCheckInterval default value + DefaultHealthCheckInterval = "30s" + // DefaultHealthCheckRetries default value + DefaultHealthCheckRetries uint = 3 + // DefaultHealthCheckStartPeriod default value + DefaultHealthCheckStartPeriod = "0s" + // DefaultHealthCheckTimeout default value + DefaultHealthCheckTimeout = "30s" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/info.go b/vendor/github.com/containers/podman/v4/libpod/define/info.go new file mode 100644 index 00000000000..911fa5c03c7 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/info.go @@ -0,0 +1,152 @@ +package define + +import ( + "github.com/containers/storage/pkg/idtools" +) + +// Info is the overall struct that describes the host system +// running libpod/podman +type Info struct { + Host *HostInfo `json:"host"` + Store *StoreInfo `json:"store"` + Registries map[string]interface{} `json:"registries"` + Plugins Plugins `json:"plugins"` + Version Version `json:"version"` +} + +// HostInfo describes the libpod host +type SecurityInfo struct { + AppArmorEnabled bool `json:"apparmorEnabled"` + DefaultCapabilities string `json:"capabilities"` + Rootless bool `json:"rootless"` + SECCOMPEnabled bool `json:"seccompEnabled"` + SECCOMPProfilePath string `json:"seccompProfilePath"` + SELinuxEnabled bool `json:"selinuxEnabled"` +} + +// HostInfo describes the libpod host +type HostInfo struct { + Arch string `json:"arch"` + BuildahVersion string `json:"buildahVersion"` + CgroupManager string `json:"cgroupManager"` + CgroupsVersion string `json:"cgroupVersion"` + CgroupControllers []string `json:"cgroupControllers"` + Conmon *ConmonInfo `json:"conmon"` + CPUs int `json:"cpus"` + CPUUtilization *CPUUsage `json:"cpuUtilization"` + Distribution DistributionInfo `json:"distribution"` + EventLogger string `json:"eventLogger"` + Hostname string `json:"hostname"` + IDMappings IDMappings `json:"idMappings,omitempty"` + Kernel string `json:"kernel"` + LogDriver string `json:"logDriver"` + MemFree int64 `json:"memFree"` + MemTotal int64 `json:"memTotal"` + NetworkBackend string `json:"networkBackend"` + OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"` + OS string `json:"os"` + // RemoteSocket returns the UNIX domain socket the Podman service is listening on + RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` + RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"` + // ServiceIsRemote is true when the podman/libpod service is remote to the client + ServiceIsRemote bool `json:"serviceIsRemote"` + Security SecurityInfo `json:"security"` + Slirp4NetNS SlirpInfo `json:"slirp4netns,omitempty"` + SwapFree int64 `json:"swapFree"` + SwapTotal int64 `json:"swapTotal"` + Uptime string `json:"uptime"` + Linkmode string `json:"linkmode"` +} + +// RemoteSocket describes information about the API socket +type RemoteSocket struct { + Path string `json:"path,omitempty"` + Exists bool `json:"exists,omitempty"` +} + +// SlirpInfo describes the slirp executable that +// is being being used. +type SlirpInfo struct { + Executable string `json:"executable"` + Package string `json:"package"` + Version string `json:"version"` +} + +// IDMappings describe the GID and UID mappings +type IDMappings struct { + GIDMap []idtools.IDMap `json:"gidmap"` + UIDMap []idtools.IDMap `json:"uidmap"` +} + +// DistributionInfo describes the host distribution +// for libpod +type DistributionInfo struct { + Distribution string `json:"distribution"` + Variant string `json:"variant,omitempty"` + Version string `json:"version"` + Codename string `json:"codename,omitempty"` +} + +// ConmonInfo describes the conmon executable being used +type ConmonInfo struct { + Package string `json:"package"` + Path string `json:"path"` + Version string `json:"version"` +} + +// OCIRuntimeInfo describes the runtime (crun or runc) being +// used with podman +type OCIRuntimeInfo struct { + Name string `json:"name"` + Package string `json:"package"` + Path string `json:"path"` + Version string `json:"version"` +} + +// StoreInfo describes the container storage and its +// attributes +type StoreInfo struct { + ConfigFile string `json:"configFile"` + ContainerStore ContainerStore `json:"containerStore"` + GraphDriverName string `json:"graphDriverName"` + GraphOptions map[string]interface{} `json:"graphOptions"` + GraphRoot string `json:"graphRoot"` + // GraphRootAllocated is how much space the graphroot has in bytes + GraphRootAllocated uint64 `json:"graphRootAllocated"` + // GraphRootUsed is how much of graphroot is used in bytes + GraphRootUsed uint64 `json:"graphRootUsed"` + GraphStatus map[string]string `json:"graphStatus"` + ImageCopyTmpDir string `json:"imageCopyTmpDir"` + ImageStore ImageStore `json:"imageStore"` + RunRoot string `json:"runRoot"` + VolumePath string `json:"volumePath"` +} + +// ImageStore describes the image store. Right now only the number +// of images present +type ImageStore struct { + Number int `json:"number"` +} + +// ContainerStore describes the quantity of containers in the +// store by status +type ContainerStore struct { + Number int `json:"number"` + Paused int `json:"paused"` + Running int `json:"running"` + Stopped int `json:"stopped"` +} + +type Plugins struct { + Volume []string `json:"volume"` + Network []string `json:"network"` + Log []string `json:"log"` + // FIXME what should we do with Authorization, docker seems to return nothing by default + // Authorization []string `json:"authorization"` +} + +type CPUUsage struct { + UserPercent float64 `json:"userPercent"` + SystemPercent float64 `json:"systemPercent"` + IdlePercent float64 `json:"idlePercent"` +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount.go b/vendor/github.com/containers/podman/v4/libpod/define/mount.go new file mode 100644 index 00000000000..1b0d019c831 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/mount.go @@ -0,0 +1,12 @@ +package define + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + // TypeVolume is the type for named volumes + TypeVolume = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeDevpts is the type for creating a devpts + TypeDevpts = "devpts" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go new file mode 100644 index 00000000000..c5ea3a3c086 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go @@ -0,0 +1,131 @@ +package define + +import ( + "net" + "time" +) + +// InspectPodData contains detailed information on a pod's configuration and +// state. It is used as the output of Inspect on pods. +type InspectPodData struct { + // ID is the ID of the pod. + ID string `json:"Id"` + // Name is the name of the pod. + Name string + // Namespace is the Libpod namespace the pod is placed in. + Namespace string `json:"Namespace,omitempty"` + // Created is the time when the pod was created. + Created time.Time + // CreateCommand is the full command plus arguments of the process the + // container has been created with. + CreateCommand []string `json:"CreateCommand,omitempty"` + // State represents the current state of the pod. + State string `json:"State"` + // Hostname is the hostname that the pod will set. + Hostname string + // Labels is a set of key-value labels that have been applied to the + // pod. + Labels map[string]string `json:"Labels,omitempty"` + // CreateCgroup is whether this pod will create its own Cgroup to group + // containers under. + CreateCgroup bool + // CgroupParent is the parent of the pod's Cgroup. + CgroupParent string `json:"CgroupParent,omitempty"` + // CgroupPath is the path to the pod's Cgroup. + CgroupPath string `json:"CgroupPath,omitempty"` + // CreateInfra is whether this pod will create an infra container to + // share namespaces. + CreateInfra bool + // InfraContainerID is the ID of the pod's infra container, if one is + // present. + InfraContainerID string `json:"InfraContainerID,omitempty"` + // InfraConfig is the configuration of the infra container of the pod. + // Will only be set if CreateInfra is true. + InfraConfig *InspectPodInfraConfig `json:"InfraConfig,omitempty"` + // SharedNamespaces contains a list of namespaces that will be shared by + // containers within the pod. Can only be set if CreateInfra is true. + SharedNamespaces []string `json:"SharedNamespaces,omitempty"` + // NumContainers is the number of containers in the pod, including the + // infra container. + NumContainers uint + // Containers gives a brief summary of all containers in the pod and + // their current status. + Containers []InspectPodContainerInfo `json:"Containers,omitempty"` + // CPUPeriod contains the CPU period of the pod + CPUPeriod uint64 `json:"cpu_period,omitempty"` + // CPUQuota contains the CPU quota of the pod + CPUQuota int64 `json:"cpu_quota,omitempty"` + // CPUSetCPUs contains linux specific CPU data for the pod + CPUSetCPUs string `json:"cpuset_cpus,omitempty"` + // Mounts contains volume related information for the pod + Mounts []InspectMount `json:"mounts,omitempty"` + // Devices contains the specified host devices + Devices []InspectDevice `json:"devices,omitempty"` + // BlkioDeviceReadBps contains the Read/Access limit for the pod's devices + BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"device_read_bps,omitempty"` + // VolumesFrom contains the containers that the pod inherits mounts from + VolumesFrom []string `json:"volumes_from,omitempty"` + // SecurityOpt contains the specified security labels and related SELinux information + SecurityOpts []string `json:"security_opt,omitempty"` +} + +// InspectPodInfraConfig contains the configuration of the pod's infra +// container. +type InspectPodInfraConfig struct { + // PortBindings are ports that will be forwarded to the infra container + // and then shared with the pod. + PortBindings map[string][]InspectHostPort + // HostNetwork is whether the infra container (and thus the whole pod) + // will use the host's network and not create a network namespace. + HostNetwork bool + // StaticIP is a static IPv4 that will be assigned to the infra + // container and then used by the pod. + // swagger:strfmt ipv4 + StaticIP net.IP + // StaticMAC is a static MAC address that will be assigned to the infra + // container and then used by the pod. + StaticMAC string + // NoManageResolvConf indicates that the pod will not manage resolv.conf + // and instead each container will handle their own. + NoManageResolvConf bool + // DNSServer is a set of DNS Servers that will be used by the infra + // container's resolv.conf and shared with the remainder of the pod. + DNSServer []string + // DNSSearch is a set of DNS search domains that will be used by the + // infra container's resolv.conf and shared with the remainder of the + // pod. + DNSSearch []string + // DNSOption is a set of DNS options that will be used by the infra + // container's resolv.conf and shared with the remainder of the pod. + DNSOption []string + // NoManageHosts indicates that the pod will not manage /etc/hosts and + // instead each container will handle their own. + NoManageHosts bool + // HostAdd adds a number of hosts to the infra container's resolv.conf + // which will be shared with the rest of the pod. + HostAdd []string + // Networks is a list of CNI networks the pod will join. + Networks []string + // NetworkOptions are additional options for each network + NetworkOptions map[string][]string + // CPUPeriod contains the CPU period of the pod + CPUPeriod uint64 `json:"cpu_period,omitempty"` + // CPUQuota contains the CPU quota of the pod + CPUQuota int64 `json:"cpu_quota,omitempty"` + // CPUSetCPUs contains linux specific CPU data for the container + CPUSetCPUs string `json:"cpuset_cpus,omitempty"` + // Pid is the PID namespace mode of the pod's infra container + PidNS string `json:"pid_ns,omitempty"` + // UserNS is the usernamespace that all the containers in the pod will join. + UserNS string `json:"userns,omitempty"` +} + +// InspectPodContainerInfo contains information on a container in a pod. +type InspectPodContainerInfo struct { + // ID is the ID of the container. + ID string `json:"Id"` + // Name is the name of the container. + Name string + // State is the current status of the container. + State string +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/podstate.go b/vendor/github.com/containers/podman/v4/libpod/define/podstate.go new file mode 100644 index 00000000000..e0267197271 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/podstate.go @@ -0,0 +1,22 @@ +package define + +const ( + // PodStateCreated indicates the pod is created but has not been started + PodStateCreated = "Created" + // PodStateErrored indicates the pod is in an errored state where + // information about it can no longer be retrieved + PodStateErrored = "Error" + // PodStateExited indicates the pod ran but has been stopped + PodStateExited = "Exited" + // PodStatePaused indicates the pod has been paused + PodStatePaused = "Paused" + // PodStateRunning indicates that all of the containers in the pod are + // running. + PodStateRunning = "Running" + // PodStateDegraded indicates that at least one, but not all, of the + // containers in the pod are running. + PodStateDegraded = "Degraded" + // PodStateStopped indicates all of the containers belonging to the pod + // are stopped. + PodStateStopped = "Stopped" +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/runtime.go b/vendor/github.com/containers/podman/v4/libpod/define/runtime.go new file mode 100644 index 00000000000..1539e19ee2f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/runtime.go @@ -0,0 +1,28 @@ +package define + +import "time" + +// RuntimeStateStore is a constant indicating which state store implementation +// should be used by libpod +type RuntimeStateStore int + +const ( + // InvalidStateStore is an invalid state store + InvalidStateStore RuntimeStateStore = iota + // InMemoryStateStore is an in-memory state that will not persist data + // on containers and pods between libpod instances or after system + // reboot + InMemoryStateStore RuntimeStateStore = iota + // SQLiteStateStore is a state backed by a SQLite database + // It is presently disabled + SQLiteStateStore RuntimeStateStore = iota + // BoltDBStateStore is a state backed by a BoltDB database + BoltDBStateStore RuntimeStateStore = iota + // ContainerCreateTimeout is the timeout before we decide we've failed + // to create a container. + // TODO: Make this generic - all OCI runtime operations should use the + // same timeout, this one. + // TODO: Consider dropping from 240 to 60 seconds. I don't think waiting + // 4 minutes versus 1 minute makes a real difference. + ContainerCreateTimeout = 240 * time.Second +) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/terminal.go b/vendor/github.com/containers/podman/v4/libpod/define/terminal.go new file mode 100644 index 00000000000..ce895554491 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/terminal.go @@ -0,0 +1,7 @@ +package define + +// TerminalSize represents the width and height of a terminal. +type TerminalSize struct { + Width uint16 + Height uint16 +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/version.go b/vendor/github.com/containers/podman/v4/libpod/define/version.go new file mode 100644 index 00000000000..2c17e6e92a9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/version.go @@ -0,0 +1,55 @@ +package define + +import ( + "runtime" + "strconv" + "time" + + "github.com/containers/podman/v4/version" +) + +// Overwritten at build time +var ( + // GitCommit is the commit that the binary is being built from. + // It will be populated by the Makefile. + gitCommit string + // BuildInfo is the time at which the binary was built + // It will be populated by the Makefile. + buildInfo string +) + +// Version is an output struct for API +type Version struct { + APIVersion string + Version string + GoVersion string + GitCommit string + BuiltTime string + Built int64 + OsArch string + Os string +} + +// GetVersion returns a VersionOutput struct for API and podman +func GetVersion() (Version, error) { + var err error + var buildTime int64 + if buildInfo != "" { + // Converts unix time from string to int64 + buildTime, err = strconv.ParseInt(buildInfo, 10, 64) + + if err != nil { + return Version{}, err + } + } + return Version{ + APIVersion: version.APIVersion[version.Libpod][version.CurrentAPI].String(), + Version: version.Version.String(), + GoVersion: runtime.Version(), + GitCommit: gitCommit, + BuiltTime: time.Unix(buildTime, 0).Format(time.ANSIC), + Built: buildTime, + OsArch: runtime.GOOS + "/" + runtime.GOARCH, + Os: runtime.GOOS, + }, nil +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go new file mode 100644 index 00000000000..fac1791763a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go @@ -0,0 +1,59 @@ +package define + +import ( + "time" +) + +// InspectVolumeData is the output of Inspect() on a volume. It is matched to +// the format of 'docker volume inspect'. +type InspectVolumeData struct { + // Name is the name of the volume. + Name string `json:"Name"` + // Driver is the driver used to create the volume. + // If set to "local" or "", the Local driver (Podman built-in code) is + // used to service the volume; otherwise, a volume plugin with the given + // name is used to mount and manage the volume. + Driver string `json:"Driver"` + // Mountpoint is the path on the host where the volume is mounted. + Mountpoint string `json:"Mountpoint"` + // CreatedAt is the date and time the volume was created at. This is not + // stored for older Libpod volumes; if so, it will be omitted. + CreatedAt time.Time `json:"CreatedAt,omitempty"` + // Status is used to return information on the volume's current state, + // if the volume was created using a volume plugin (uses a Driver that + // is not the local driver). + // Status is provided to us by an external program, so no guarantees are + // made about its format or contents. Further, it is an optional field, + // so it may not be set even in cases where a volume plugin is in use. + Status map[string]interface{} `json:"Status,omitempty"` + // Labels includes the volume's configured labels, key:value pairs that + // can be passed during volume creation to provide information for third + // party tools. + Labels map[string]string `json:"Labels"` + // Scope is unused and provided solely for Docker compatibility. It is + // unconditionally set to "local". + Scope string `json:"Scope"` + // Options is a set of options that were used when creating the volume. + // For the Local driver, these are mount options that will be used to + // determine how a local filesystem is mounted; they are handled as + // parameters to Mount in a manner described in the volume create + // manpage. + // For non-local drivers, these are passed as-is to the volume plugin. + Options map[string]string `json:"Options"` + // UID is the UID that the volume was created with. + UID int `json:"UID,omitempty"` + // GID is the GID that the volume was created with. + GID int `json:"GID,omitempty"` + // Anonymous indicates that the volume was created as an anonymous + // volume for a specific container, and will be be removed when any + // container using it is removed. + Anonymous bool `json:"Anonymous,omitempty"` + // MountCount is the number of times this volume has been mounted. + MountCount uint `json:"MountCount"` + // NeedsCopyUp indicates that the next time the volume is mounted into + NeedsCopyUp bool `json:"NeedsCopyUp,omitempty"` + // NeedsChown indicates that the next time the volume is mounted into + // a container, the container will chown the volume to the container process + // UID/GID. + NeedsChown bool `json:"NeedsChown,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/config.go b/vendor/github.com/containers/podman/v4/libpod/events/config.go new file mode 100644 index 00000000000..00cdca00705 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/config.go @@ -0,0 +1,210 @@ +package events + +import ( + "context" + "time" + + "github.com/pkg/errors" +) + +// EventerType ... +type EventerType int + +const ( + // LogFile indicates the event logger will be a logfile + LogFile EventerType = iota + // Journald indicates journald should be used to log events + Journald EventerType = iota + // Null is a no-op events logger. It does not read or write events. + Null EventerType = iota + // Memory indicates the event logger will hold events in memory + Memory EventerType = iota +) + +// Event describes the attributes of a libpod event +type Event struct { + // ContainerExitCode is for storing the exit code of a container which can + // be used for "internal" event notification + ContainerExitCode int `json:",omitempty"` + // ID can be for the container, image, volume, etc + ID string `json:",omitempty"` + // Image used where applicable + Image string `json:",omitempty"` + // Name where applicable + Name string `json:",omitempty"` + // Network is the network name in a network event + Network string `json:"network,omitempty"` + // Status describes the event that occurred + Status Status + // Time the event occurred + Time time.Time + // Type of event that occurred + Type Type + + Details +} + +// Details describes specifics about certain events, specifically around +// container events +type Details struct { + // ID is the event ID + ID string + // Attributes can be used to describe specifics about the event + // in the case of a container event, labels for example + Attributes map[string]string +} + +// EventerOptions describe options that need to be passed to create +// an eventer +type EventerOptions struct { + // EventerType describes whether to use journald, file or memory + EventerType string + // LogFilePath is the path to where the log file should reside if using + // the file logger + LogFilePath string + // LogFileMaxSize is the default limit used for rotating the log file + LogFileMaxSize uint64 +} + +// Eventer is the interface for journald or file event logging +type Eventer interface { + // Write an event to a backend + Write(event Event) error + // Read an event from the backend + Read(ctx context.Context, options ReadOptions) error + // String returns the type of event logger + String() string +} + +// ReadOptions describe the attributes needed to read event logs +type ReadOptions struct { + // EventChannel is the comm path back to user + EventChannel chan *Event + // Filters are key/value pairs that describe to limit output + Filters []string + // FromStart means you start reading from the start of the logs + FromStart bool + // Since reads "since" the given time + Since string + // Stream is follow + Stream bool + // Until reads "until" the given time + Until string +} + +// Type of event that occurred (container, volume, image, pod, etc) +type Type string + +// Status describes the actual event action (stop, start, create, kill) +type Status string + +const ( + // Container - event is related to containers + Container Type = "container" + // Image - event is related to images + Image Type = "image" + // Network - event is related to networks + Network Type = "network" + // Pod - event is related to pods + Pod Type = "pod" + // System - event is related to Podman whole and not to any specific + // container/pod/image/volume + System Type = "system" + // Volume - event is related to volumes + Volume Type = "volume" + // Machine - event is related to machine VM's + Machine Type = "machine" + + // Attach ... + Attach Status = "attach" + // AutoUpdate ... + AutoUpdate Status = "auto-update" + // Build ... + Build Status = "build" + // Checkpoint ... + Checkpoint Status = "checkpoint" + // Cleanup ... + Cleanup Status = "cleanup" + // Commit ... + Commit Status = "commit" + // Copy ... + Copy Status = "copy" + // Create ... + Create Status = "create" + // Exec ... + Exec Status = "exec" + // ExecDied indicates that an exec session in a container died. + ExecDied Status = "exec_died" + // Exited indicates that a container's process died + Exited Status = "died" + // Export ... + Export Status = "export" + // History ... + History Status = "history" + // Import ... + Import Status = "import" + // Init ... + Init Status = "init" + // Kill ... + Kill Status = "kill" + // LoadFromArchive ... + LoadFromArchive Status = "loadfromarchive" + // Mount ... + Mount Status = "mount" + // NetworkConnect + NetworkConnect Status = "connect" + // NetworkDisconnect + NetworkDisconnect Status = "disconnect" + // Pause ... + Pause Status = "pause" + // Prune ... + Prune Status = "prune" + // Pull ... + Pull Status = "pull" + // Push ... + Push Status = "push" + // Refresh indicates that the system refreshed the state after a + // reboot. + Refresh Status = "refresh" + // Remove ... + Remove Status = "remove" + // Rename indicates that a container was renamed + Rename Status = "rename" + // Renumber indicates that lock numbers were reallocated at user + // request. + Renumber Status = "renumber" + // Restart indicates the target was restarted via an API call. + Restart Status = "restart" + // Restore ... + Restore Status = "restore" + // Rotate indicates that the log file was rotated + Rotate Status = "log-rotation" + // Save ... + Save Status = "save" + // Start ... + Start Status = "start" + // Stop ... + Stop Status = "stop" + // Sync ... + Sync Status = "sync" + // Tag ... + Tag Status = "tag" + // Unmount ... + Unmount Status = "unmount" + // Unpause ... + Unpause Status = "unpause" + // Untag ... + Untag Status = "untag" +) + +// EventFilter for filtering events +type EventFilter func(*Event) bool + +var ( + // ErrEventTypeBlank indicates the event log found something done by podman + // but it isn't likely an event + ErrEventTypeBlank = errors.New("event type blank") + + // ErrEventNotFound indicates that the event was not found in the event log + ErrEventNotFound = errors.New("unable to find event") +) diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events.go b/vendor/github.com/containers/podman/v4/libpod/events/events.go new file mode 100644 index 00000000000..e83c2efeeef --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/events.go @@ -0,0 +1,229 @@ +package events + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" +) + +// ErrNoJournaldLogging indicates that there is no journald logging +// supported (requires libsystemd) +var ErrNoJournaldLogging = errors.New("No support for journald logging") + +// String returns a string representation of EventerType +func (et EventerType) String() string { + switch et { + case LogFile: + return "file" + case Journald: + return "journald" + case Memory: + return "memory" + case Null: + return "none" + default: + return "invalid" + } +} + +// IsValidEventer checks if the given string is a valid eventer type. +func IsValidEventer(eventer string) bool { + switch eventer { + case LogFile.String(): + return true + case Journald.String(): + return true + case Memory.String(): + return true + case Null.String(): + return true + default: + return false + } +} + +// NewEvent creates an event struct and populates with +// the given status and time. +func NewEvent(status Status) Event { + return Event{ + Status: status, + Time: time.Now(), + } +} + +// Recycle checks if the event log has reach a limit and if so +// renames the current log and starts a new one. The remove bool +// indicates the old log file should be deleted. +func (e *Event) Recycle(path string, remove bool) error { + return errors.New("not implemented") +} + +// ToJSONString returns the event as a json'ified string +func (e *Event) ToJSONString() (string, error) { + b, err := json.Marshal(e) + return string(b), err +} + +// ToHumanReadable returns human-readable event as a formatted string +func (e *Event) ToHumanReadable(truncate bool) string { + var humanFormat string + id := e.ID + if truncate { + id = stringid.TruncateID(id) + } + switch e.Type { + case Container, Pod: + humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name) + // check if the container has labels and add it to the output + if len(e.Attributes) > 0 { + for k, v := range e.Attributes { + humanFormat += fmt.Sprintf(", %s=%s", k, v) + } + } + humanFormat += ")" + case Network: + humanFormat = fmt.Sprintf("%s %s %s %s (container=%s, name=%s)", e.Time, e.Type, e.Status, id, id, e.Network) + case Image: + humanFormat = fmt.Sprintf("%s %s %s %s %s", e.Time, e.Type, e.Status, id, e.Name) + case System: + if e.Name != "" { + humanFormat = fmt.Sprintf("%s %s %s %s", e.Time, e.Type, e.Status, e.Name) + } else { + humanFormat = fmt.Sprintf("%s %s %s", e.Time, e.Type, e.Status) + } + case Volume, Machine: + humanFormat = fmt.Sprintf("%s %s %s %s", e.Time, e.Type, e.Status, e.Name) + } + return humanFormat +} + +// NewEventFromString takes stringified json and converts +// it to an event +func newEventFromJSONString(event string) (*Event, error) { + e := new(Event) + if err := json.Unmarshal([]byte(event), e); err != nil { + return nil, err + } + return e, nil +} + +// String converts a Type to a string +func (t Type) String() string { + return string(t) +} + +// String converts a status to a string +func (s Status) String() string { + return string(s) +} + +// StringToType converts string to an EventType +func StringToType(name string) (Type, error) { + switch name { + case Container.String(): + return Container, nil + case Image.String(): + return Image, nil + case Machine.String(): + return Machine, nil + case Network.String(): + return Network, nil + case Pod.String(): + return Pod, nil + case System.String(): + return System, nil + case Volume.String(): + return Volume, nil + case "": + return "", ErrEventTypeBlank + } + return "", errors.Errorf("unknown event type %q", name) +} + +// StringToStatus converts a string to an Event Status +// TODO if we add more events, we might consider a go-generator to +// create the switch statement +func StringToStatus(name string) (Status, error) { + switch name { + case Attach.String(): + return Attach, nil + case AutoUpdate.String(): + return AutoUpdate, nil + case Build.String(): + return Build, nil + case Checkpoint.String(): + return Checkpoint, nil + case Cleanup.String(): + return Cleanup, nil + case Commit.String(): + return Commit, nil + case Create.String(): + return Create, nil + case Exec.String(): + return Exec, nil + case ExecDied.String(): + return ExecDied, nil + case Exited.String(): + return Exited, nil + case Export.String(): + return Export, nil + case History.String(): + return History, nil + case Import.String(): + return Import, nil + case Init.String(): + return Init, nil + case Kill.String(): + return Kill, nil + case LoadFromArchive.String(): + return LoadFromArchive, nil + case Mount.String(): + return Mount, nil + case NetworkConnect.String(): + return NetworkConnect, nil + case NetworkDisconnect.String(): + return NetworkDisconnect, nil + case Pause.String(): + return Pause, nil + case Prune.String(): + return Prune, nil + case Pull.String(): + return Pull, nil + case Push.String(): + return Push, nil + case Refresh.String(): + return Refresh, nil + case Remove.String(): + return Remove, nil + case Rename.String(): + return Rename, nil + case Renumber.String(): + return Renumber, nil + case Restart.String(): + return Restart, nil + case Restore.String(): + return Restore, nil + case Rotate.String(): + return Rotate, nil + case Save.String(): + return Save, nil + case Start.String(): + return Start, nil + case Stop.String(): + return Stop, nil + case Sync.String(): + return Sync, nil + case Tag.String(): + return Tag, nil + case Unmount.String(): + return Unmount, nil + case Unpause.String(): + return Unpause, nil + case Untag.String(): + return Untag, nil + } + return "", errors.Errorf("unknown event status %q", name) +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go b/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go new file mode 100644 index 00000000000..4320f219018 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go @@ -0,0 +1,29 @@ +package events + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NewEventer creates an eventer based on the eventer type +func NewEventer(options EventerOptions) (Eventer, error) { + logrus.Debugf("Initializing event backend %s", options.EventerType) + switch strings.ToUpper(options.EventerType) { + case strings.ToUpper(Journald.String()): + eventer, err := newEventJournalD(options) + if err != nil { + return nil, errors.Wrapf(err, "eventer creation") + } + return eventer, nil + case strings.ToUpper(LogFile.String()): + return EventLogFile{options}, nil + case strings.ToUpper(Null.String()): + return NewNullEventer(), nil + case strings.ToUpper(Memory.String()): + return NewMemoryEventer(), nil + default: + return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType)) + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go new file mode 100644 index 00000000000..25c17552419 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go @@ -0,0 +1,11 @@ +//go:build !linux +// +build !linux + +package events + +import "github.com/pkg/errors" + +// NewEventer creates an eventer based on the eventer type +func NewEventer(options EventerOptions) (Eventer, error) { + return nil, errors.New("this function is not available for your platform") +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/filters.go b/vendor/github.com/containers/podman/v4/libpod/events/filters.go new file mode 100644 index 00000000000..64c162db2f6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/filters.go @@ -0,0 +1,155 @@ +package events + +import ( + "strings" + "time" + + "github.com/containers/podman/v4/pkg/util" + "github.com/pkg/errors" +) + +func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error) { + switch strings.ToUpper(filter) { + case "CONTAINER": + return func(e *Event) bool { + if e.Type != Container { + return false + } + if e.Name == filterValue { + return true + } + return strings.HasPrefix(e.ID, filterValue) + }, nil + case "EVENT", "STATUS": + return func(e *Event) bool { + return string(e.Status) == filterValue + }, nil + case "IMAGE": + return func(e *Event) bool { + if e.Type != Image { + return false + } + if e.Name == filterValue { + return true + } + return strings.HasPrefix(e.ID, filterValue) + }, nil + case "POD": + return func(e *Event) bool { + if e.Type != Pod { + return false + } + if e.Name == filterValue { + return true + } + return strings.HasPrefix(e.ID, filterValue) + }, nil + case "VOLUME": + return func(e *Event) bool { + if e.Type != Volume { + return false + } + return strings.HasPrefix(e.ID, filterValue) + }, nil + case "TYPE": + return func(e *Event) bool { + return string(e.Type) == filterValue + }, nil + + case "LABEL": + return func(e *Event) bool { + var found bool + // iterate labels and see if we match a key and value + for eventKey, eventValue := range e.Attributes { + filterValueSplit := strings.SplitN(filterValue, "=", 2) + // if the filter isn't right, just return false + if len(filterValueSplit) < 2 { + return false + } + if eventKey == filterValueSplit[0] && eventValue == filterValueSplit[1] { + found = true + break + } + } + return found + }, nil + } + return nil, errors.Errorf("%s is an invalid filter", filter) +} + +func generateEventSinceOption(timeSince time.Time) func(e *Event) bool { + return func(e *Event) bool { + return e.Time.After(timeSince) + } +} + +func generateEventUntilOption(timeUntil time.Time) func(e *Event) bool { + return func(e *Event) bool { + return e.Time.Before(timeUntil) + } +} + +func parseFilter(filter string) (string, string, error) { + filterSplit := strings.SplitN(filter, "=", 2) + if len(filterSplit) != 2 { + return "", "", errors.Errorf("%s is an invalid filter", filter) + } + return filterSplit[0], filterSplit[1], nil +} + +// applyFilters applies the EventFilter slices in sequence. Filters under the +// same key are disjunctive while each key must match (conjuctive). +func applyFilters(event *Event, filterMap map[string][]EventFilter) bool { + for _, filters := range filterMap { + success := false + for _, filter := range filters { + if filter(event) { + success = true + break + } + } + if !success { + return false + } + } + return true +} + +// generateEventFilter parses the specified filters into a filter map that can +// later on be used to filter events. Keys are conjunctive, values are +// disjunctive. +func generateEventFilters(filters []string, since, until string) (map[string][]EventFilter, error) { + filterMap := make(map[string][]EventFilter) + for _, filter := range filters { + key, val, err := parseFilter(filter) + if err != nil { + return nil, err + } + filterFunc, err := generateEventFilter(key, val) + if err != nil { + return nil, err + } + filterSlice := filterMap[key] + filterSlice = append(filterSlice, filterFunc) + filterMap[key] = filterSlice + } + + if len(since) > 0 { + timeSince, err := util.ParseInputTime(since, true) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert since time of %s", since) + } + filterFunc := generateEventSinceOption(timeSince) + filterMap["since"] = []EventFilter{filterFunc} + } + + if len(until) > 0 { + timeUntil, err := util.ParseInputTime(until, false) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert until time of %s", until) + } + filterFunc := generateEventUntilOption(timeUntil) + filterMap["until"] = []EventFilter{filterFunc} + } + return filterMap, nil +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go new file mode 100644 index 00000000000..866042a4c3d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go @@ -0,0 +1,229 @@ +//go:build systemd +// +build systemd + +package events + +import ( + "context" + "encoding/json" + "strconv" + "time" + + "github.com/containers/podman/v4/pkg/util" + "github.com/coreos/go-systemd/v22/journal" + "github.com/coreos/go-systemd/v22/sdjournal" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// DefaultEventerType is journald when systemd is available +const DefaultEventerType = Journald + +// EventJournalD is the journald implementation of an eventer +type EventJournalD struct { + options EventerOptions +} + +// newEventJournalD creates a new journald Eventer +func newEventJournalD(options EventerOptions) (Eventer, error) { + return EventJournalD{options}, nil +} + +// Write to journald +func (e EventJournalD) Write(ee Event) error { + m := make(map[string]string) + m["SYSLOG_IDENTIFIER"] = "podman" + m["PODMAN_EVENT"] = ee.Status.String() + m["PODMAN_TYPE"] = ee.Type.String() + m["PODMAN_TIME"] = ee.Time.Format(time.RFC3339Nano) + + // Add specialized information based on the podman type + switch ee.Type { + case Image: + m["PODMAN_NAME"] = ee.Name + m["PODMAN_ID"] = ee.ID + case Container, Pod: + m["PODMAN_IMAGE"] = ee.Image + m["PODMAN_NAME"] = ee.Name + m["PODMAN_ID"] = ee.ID + if ee.ContainerExitCode != 0 { + m["PODMAN_EXIT_CODE"] = strconv.Itoa(ee.ContainerExitCode) + } + // If we have container labels, we need to convert them to a string so they + // can be recorded with the event + if len(ee.Details.Attributes) > 0 { + b, err := json.Marshal(ee.Details.Attributes) + if err != nil { + return err + } + m["PODMAN_LABELS"] = string(b) + } + case Network: + m["PODMAN_ID"] = ee.ID + m["PODMAN_NETWORK_NAME"] = ee.Network + case Volume: + m["PODMAN_NAME"] = ee.Name + } + return journal.Send(string(ee.ToHumanReadable(false)), journal.PriInfo, m) +} + +// Read reads events from the journal and sends qualified events to the event channel +func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error { + defer close(options.EventChannel) + filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) + if err != nil { + return errors.Wrapf(err, "failed to parse event filters") + } + + var untilTime time.Time + if len(options.Until) > 0 { + untilTime, err = util.ParseInputTime(options.Until, false) + if err != nil { + return err + } + } + + j, err := sdjournal.NewJournal() + if err != nil { + return err + } + defer func() { + if err := j.Close(); err != nil { + logrus.Errorf("Unable to close journal :%v", err) + } + }() + // match only podman journal entries + podmanJournal := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"} + if err := j.AddMatch(podmanJournal.String()); err != nil { + return errors.Wrap(err, "failed to add journal filter for event log") + } + + if len(options.Since) == 0 && len(options.Until) == 0 && options.Stream { + if err := j.SeekTail(); err != nil { + return errors.Wrap(err, "failed to seek end of journal") + } + // After SeekTail calling Next moves to a random entry. + // To prevent this we have to call Previous first. + // see: https://bugs.freedesktop.org/show_bug.cgi?id=64614 + if _, err := j.Previous(); err != nil { + return errors.Wrap(err, "failed to move journal cursor to previous entry") + } + } + + // the api requires a next|prev before getting a cursor + if _, err := j.Next(); err != nil { + return errors.Wrap(err, "failed to move journal cursor to next entry") + } + + prevCursor, err := j.GetCursor() + if err != nil { + return errors.Wrap(err, "failed to get journal cursor") + } + for { + select { + case <-ctx.Done(): + // the consumer has cancelled + return nil + default: + // fallthrough + } + + if _, err := j.Next(); err != nil { + return errors.Wrap(err, "failed to move journal cursor to next entry") + } + newCursor, err := j.GetCursor() + if err != nil { + return errors.Wrap(err, "failed to get journal cursor") + } + if prevCursor == newCursor { + if !options.Stream || (len(options.Until) > 0 && time.Now().After(untilTime)) { + break + } + t := sdjournal.IndefiniteWait + if len(options.Until) > 0 { + t = time.Until(untilTime) + } + _ = j.Wait(t) + continue + } + prevCursor = newCursor + + entry, err := j.GetEntry() + if err != nil { + return errors.Wrap(err, "failed to read journal entry") + } + newEvent, err := newEventFromJournalEntry(entry) + if err != nil { + // We can't decode this event. + // Don't fail hard - that would make events unusable. + // Instead, log and continue. + if errors.Cause(err) != ErrEventTypeBlank { + logrus.Errorf("Unable to decode event: %v", err) + } + continue + } + if applyFilters(newEvent, filterMap) { + options.EventChannel <- newEvent + } + } + return nil + +} + +func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { //nolint + newEvent := Event{} + eventType, err := StringToType(entry.Fields["PODMAN_TYPE"]) + if err != nil { + return nil, err + } + eventTime, err := time.Parse(time.RFC3339Nano, entry.Fields["PODMAN_TIME"]) + if err != nil { + return nil, err + } + eventStatus, err := StringToStatus(entry.Fields["PODMAN_EVENT"]) + if err != nil { + return nil, err + } + newEvent.Type = eventType + newEvent.Time = eventTime + newEvent.Status = eventStatus + newEvent.Name = entry.Fields["PODMAN_NAME"] + + switch eventType { + case Container, Pod: + newEvent.ID = entry.Fields["PODMAN_ID"] + newEvent.Image = entry.Fields["PODMAN_IMAGE"] + if code, ok := entry.Fields["PODMAN_EXIT_CODE"]; ok { + intCode, err := strconv.Atoi(code) + if err != nil { + logrus.Errorf("Parsing event exit code %s", code) + } else { + newEvent.ContainerExitCode = intCode + } + } + + // we need to check for the presence of labels recorded to a container event + if stringLabels, ok := entry.Fields["PODMAN_LABELS"]; ok && len(stringLabels) > 0 { + labels := make(map[string]string, 0) + if err := json.Unmarshal([]byte(stringLabels), &labels); err != nil { + return nil, err + } + + // if we have labels, add them to the event + if len(labels) > 0 { + newEvent.Details = Details{Attributes: labels} + } + } + case Network: + newEvent.ID = entry.Fields["PODMAN_ID"] + newEvent.Network = entry.Fields["PODMAN_NETWORK_NAME"] + case Image: + newEvent.ID = entry.Fields["PODMAN_ID"] + } + return &newEvent, nil +} + +// String returns a string representation of the logger +func (e EventJournalD) String() string { + return Journald.String() +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go new file mode 100644 index 00000000000..6ed39792bca --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go @@ -0,0 +1,12 @@ +//go:build !systemd +// +build !systemd + +package events + +// DefaultEventerType is logfile when systemd is not present +const DefaultEventerType = LogFile + +// newEventJournalD always returns an error if libsystemd not found +func newEventJournalD(options EventerOptions) (Eventer, error) { + return nil, ErrNoJournaldLogging +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/logfile.go b/vendor/github.com/containers/podman/v4/libpod/events/logfile.go new file mode 100644 index 00000000000..21fdd802712 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/logfile.go @@ -0,0 +1,261 @@ +//go:build linux +// +build linux + +package events + +import ( + "bufio" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "time" + + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/storage/pkg/lockfile" + "github.com/nxadm/tail" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// EventLogFile is the structure for event writing to a logfile. It contains the eventer +// options and the event itself. Methods for reading and writing are also defined from it. +type EventLogFile struct { + options EventerOptions +} + +// Writes to the log file +func (e EventLogFile) Write(ee Event) error { + // We need to lock events file + lock, err := lockfile.GetLockfile(e.options.LogFilePath + ".lock") + if err != nil { + return err + } + lock.Lock() + defer lock.Unlock() + + eventJSONString, err := ee.ToJSONString() + if err != nil { + return err + } + + rotated, err := rotateLog(e.options.LogFilePath, eventJSONString, e.options.LogFileMaxSize) + if err != nil { + return fmt.Errorf("rotating log file: %w", err) + } + + if rotated { + rEvent := NewEvent(Rotate) + rEvent.Type = System + rEvent.Name = e.options.LogFilePath + rotateJSONString, err := rEvent.ToJSONString() + if err != nil { + return err + } + if err := e.writeString(rotateJSONString); err != nil { + return err + } + } + + return e.writeString(eventJSONString) +} + +func (e EventLogFile) writeString(s string) error { + f, err := os.OpenFile(e.options.LogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0700) + if err != nil { + return err + } + if _, err := f.WriteString(s + "\n"); err != nil { + return err + } + return nil +} + +func (e EventLogFile) getTail(options ReadOptions) (*tail.Tail, error) { + reopen := true + seek := tail.SeekInfo{Offset: 0, Whence: os.SEEK_END} + if options.FromStart || !options.Stream { + seek.Whence = 0 + reopen = false + } + stream := options.Stream + return tail.TailFile(e.options.LogFilePath, tail.Config{ReOpen: reopen, Follow: stream, Location: &seek, Logger: tail.DiscardingLogger, Poll: true}) +} + +// Reads from the log file +func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { + defer close(options.EventChannel) + filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) + if err != nil { + return errors.Wrapf(err, "failed to parse event filters") + } + t, err := e.getTail(options) + if err != nil { + return err + } + if len(options.Until) > 0 { + untilTime, err := util.ParseInputTime(options.Until, false) + if err != nil { + return err + } + go func() { + time.Sleep(time.Until(untilTime)) + if err := t.Stop(); err != nil { + logrus.Errorf("Stopping logger: %v", err) + } + }() + } + funcDone := make(chan bool) + copy := true + go func() { + select { + case <-funcDone: + // Do nothing + case <-ctx.Done(): + copy = false + t.Kill(errors.New("hangup by client")) + } + }() + for line := range t.Lines { + select { + case <-ctx.Done(): + // the consumer has cancelled + return nil + default: + // fallthrough + } + + event, err := newEventFromJSONString(line.Text) + if err != nil { + return err + } + switch event.Type { + case Image, Volume, Pod, System, Container, Network: + // no-op + default: + return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) + } + if copy && applyFilters(event, filterMap) { + options.EventChannel <- event + } + } + funcDone <- true + return nil +} + +// String returns a string representation of the logger +func (e EventLogFile) String() string { + return LogFile.String() +} + +// Rotates the log file if the log file size and content exceeds limit +func rotateLog(logfile string, content string, limit uint64) (bool, error) { + if limit == 0 { + return false, nil + } + file, err := os.Stat(logfile) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // The logfile does not exist yet. + return false, nil + } + return false, err + } + var filesize = uint64(file.Size()) + var contentsize = uint64(len([]rune(content))) + if filesize+contentsize < limit { + return false, nil + } + + if err := truncate(logfile); err != nil { + return false, err + } + return true, nil +} + +// Truncates the log file and saves 50% of content to new log file +func truncate(filePath string) error { + orig, err := os.Open(filePath) + if err != nil { + return err + } + defer orig.Close() + + origFinfo, err := orig.Stat() + if err != nil { + return err + } + + size := origFinfo.Size() + threshold := size / 2 + + tmp, err := ioutil.TempFile(path.Dir(filePath), "") + if err != nil { + // Retry in /tmp in case creating a tmp file in the same + // directory has failed. + tmp, err = ioutil.TempFile("", "") + if err != nil { + return err + } + } + defer tmp.Close() + + // Jump directly to the threshold, drop the first line and copy the remainder + if _, err := orig.Seek(threshold, 0); err != nil { + return err + } + reader := bufio.NewReader(orig) + if _, err := reader.ReadString('\n'); err != nil { + if !errors.Is(err, io.EOF) { + return err + } + } + if _, err := reader.WriteTo(tmp); err != nil { + return fmt.Errorf("writing truncated contents: %w", err) + } + + if err := renameLog(tmp.Name(), filePath); err != nil { + return fmt.Errorf("writing back %s to %s: %w", tmp.Name(), filePath, err) + } + + return nil +} + +// Renames from, to +func renameLog(from, to string) error { + err := os.Rename(from, to) + if err == nil { + return nil + } + + if !errors.Is(err, unix.EXDEV) { + return err + } + + // Files are not on the same partition, so we need to copy them the + // hard way. + fFrom, err := os.Open(from) + if err != nil { + return err + } + defer fFrom.Close() + + fTo, err := os.Create(to) + if err != nil { + return err + } + defer fTo.Close() + + if _, err := io.Copy(fTo, fFrom); err != nil { + return fmt.Errorf("writing back from temporary file: %w", err) + } + + if err := os.Remove(from); err != nil { + return fmt.Errorf("removing temporary file: %w", err) + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/memory.go b/vendor/github.com/containers/podman/v4/libpod/events/memory.go new file mode 100644 index 00000000000..b3e03d86bca --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/memory.go @@ -0,0 +1,49 @@ +package events + +import ( + "context" +) + +// EventMemory is the structure for event writing to a channel. It contains the eventer +// options and the event itself. Methods for reading and writing are also defined from it. +type EventMemory struct { + options EventerOptions + elements chan *Event +} + +// Write event to memory queue +func (e EventMemory) Write(event Event) (err error) { + e.elements <- &event + return +} + +// Read event(s) from memory queue +func (e EventMemory) Read(ctx context.Context, options ReadOptions) (err error) { + select { + case <-ctx.Done(): + return + default: + } + + select { + case event := <-e.elements: + options.EventChannel <- event + default: + } + return nil +} + +// String returns eventer type +func (e EventMemory) String() string { + return e.options.EventerType +} + +// NewMemoryEventer returns configured MemoryEventer +func NewMemoryEventer() Eventer { + return EventMemory{ + options: EventerOptions{ + EventerType: Memory.String(), + }, + elements: make(chan *Event, 100), + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/nullout.go b/vendor/github.com/containers/podman/v4/libpod/events/nullout.go new file mode 100644 index 00000000000..3eca9e8dba3 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/nullout.go @@ -0,0 +1,30 @@ +package events + +import ( + "context" +) + +// EventToNull is an eventer type that only performs write operations +// and only writes to /dev/null. It is meant for unittests only +type EventToNull struct{} + +// Write eats the event and always returns nil +func (e EventToNull) Write(ee Event) error { + return nil +} + +// Read does nothing. Do not use it. +func (e EventToNull) Read(ctx context.Context, options ReadOptions) error { + return nil +} + +// NewNullEventer returns a new null eventer. You should only do this for +// the purposes on internal libpod testing. +func NewNullEventer() Eventer { + return EventToNull{} +} + +// String returns a string representation of the logger +func (e EventToNull) String() string { + return "none" +} diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go new file mode 100644 index 00000000000..fbe03d97b30 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go @@ -0,0 +1,129 @@ +package handlers + +import ( + "encoding/json" + "reflect" + "syscall" + "time" + + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/util" + "github.com/gorilla/schema" + "github.com/sirupsen/logrus" +) + +// NewAPIDecoder returns a configured schema.Decoder +func NewAPIDecoder() *schema.Decoder { + _ = ParseDateTime + + d := schema.NewDecoder() + d.IgnoreUnknownKeys(true) + d.RegisterConverter(map[string][]string{}, convertURLValuesString) + d.RegisterConverter(time.Time{}, convertTimeString) + d.RegisterConverter(define.ContainerStatus(0), convertContainerStatusString) + d.RegisterConverter(map[string]string{}, convertStringMap) + + var Signal syscall.Signal + d.RegisterConverter(Signal, convertSignal) + return d +} + +// On client: +// v := map[string][]string{ +// "dangling": {"true"}, +// } +// +// payload, err := jsoniter.MarshalToString(v) +// if err != nil { +// panic(err) +// } +// payload = url.QueryEscape(payload) +func convertURLValuesString(query string) reflect.Value { + f := map[string][]string{} + + err := json.Unmarshal([]byte(query), &f) + if err != nil { + logrus.Infof("convertURLValuesString: Failed to Unmarshal %s: %s", query, err.Error()) + } + + return reflect.ValueOf(f) +} + +func convertStringMap(query string) reflect.Value { + res := make(map[string]string) + err := json.Unmarshal([]byte(query), &res) + if err != nil { + logrus.Infof("convertStringMap: Failed to Unmarshal %s: %s", query, err.Error()) + } + return reflect.ValueOf(res) +} + +func convertContainerStatusString(query string) reflect.Value { + result, err := define.StringToContainerStatus(query) + if err != nil { + logrus.Infof("convertContainerStatusString: Failed to parse %s: %s", query, err.Error()) + + // We return nil here instead of result because reflect.ValueOf().IsValid() will be true + // in github.com/gorilla/schema's decoder, which means there's no parsing error + return reflect.ValueOf(nil) + } + + return reflect.ValueOf(result) +} + +// isZero() can be used to determine if parsing failed. +func convertTimeString(query string) reflect.Value { + var ( + err error + t time.Time + ) + + for _, f := range []string{ + time.UnixDate, + time.ANSIC, + time.RFC1123, + time.RFC1123Z, + time.RFC3339, + time.RFC3339Nano, + time.RFC822, + time.RFC822Z, + time.RFC850, + time.RubyDate, + time.Stamp, + time.StampMicro, + time.StampMilli, + time.StampNano, + } { + t, err = time.Parse(f, query) + if err == nil { + return reflect.ValueOf(t) + } + + if _, isParseError := err.(*time.ParseError); isParseError { + // Try next format + continue + } else { + break + } + } + + // We've exhausted all formats, or something bad happened + if err != nil { + logrus.Infof("convertTimeString: Failed to parse %s: %s", query, err.Error()) + } + return reflect.ValueOf(time.Time{}) +} + +// ParseDateTime is a helper function to aid in parsing different Time/Date formats +// isZero() can be used to determine if parsing failed. +func ParseDateTime(query string) time.Time { + return convertTimeString(query).Interface().(time.Time) +} + +func convertSignal(query string) reflect.Value { + signal, err := util.ParseSignal(query) + if err != nil { + logrus.Infof("convertSignal: Failed to parse %s: %s", query, err.Error()) + } + return reflect.ValueOf(signal) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go new file mode 100644 index 00000000000..9eb712c304d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go @@ -0,0 +1,254 @@ +package handlers + +import ( + "context" + "time" + + "github.com/containers/common/libimage" + "github.com/containers/podman/v4/pkg/domain/entities" + docker "github.com/docker/docker/api/types" + dockerContainer "github.com/docker/docker/api/types/container" + dockerNetwork "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +type AuthConfig struct { + docker.AuthConfig +} + +type ImageInspect struct { + docker.ImageInspect +} + +type ContainerConfig struct { + dockerContainer.Config +} + +type LibpodImagesPullReport struct { + entities.ImagePullReport +} + +// LibpodImagesRemoveReport is the return type for image removal via the rest +// api. +type LibpodImagesRemoveReport struct { + entities.ImageRemoveReport + // Image removal requires is to return data and an error. + Errors []string +} + +type ContainersPruneReport struct { + docker.ContainersPruneReport +} + +type ContainersPruneReportLibpod struct { + ID string `json:"Id"` + SpaceReclaimed int64 `json:"Size"` + // Error which occurred during prune operation (if any). + // This field is optional and may be omitted if no error occurred. + // + // Extensions: + // x-omitempty: true + // x-nullable: true + PruneError string `json:"Err,omitempty"` +} + +type LibpodContainersRmReport struct { + ID string `json:"Id"` + // Error which occurred during Rm operation (if any). + // This field is optional and may be omitted if no error occurred. + // + // Extensions: + // x-omitempty: true + // x-nullable: true + RmError string `json:"Err,omitempty"` +} + +type Info struct { + docker.Info + BuildahVersion string + CPURealtimePeriod bool + CPURealtimeRuntime bool + CgroupVersion string + Rootless bool + SwapFree int64 + SwapTotal int64 + Uptime string +} + +type Container struct { + docker.Container + docker.ContainerCreateConfig +} + +type DiskUsage struct { + docker.DiskUsage +} + +type VolumesPruneReport struct { + docker.VolumesPruneReport +} + +type ImagesPruneReport struct { + docker.ImagesPruneReport +} + +type BuildCachePruneReport struct { + docker.BuildCachePruneReport +} + +type NetworkPruneReport struct { + docker.NetworksPruneReport +} + +type ConfigCreateResponse struct { + docker.ConfigCreateResponse +} + +type PushResult struct { + docker.PushResult +} + +type BuildResult struct { + docker.BuildResult +} + +type ContainerWaitOKBody struct { + StatusCode int + Error *struct { + Message string + } +} + +// CreateContainerConfig used when compatible endpoint creates a container +// swagger:model +type CreateContainerConfig struct { + Name string // container name + dockerContainer.Config // desired container configuration + HostConfig dockerContainer.HostConfig // host dependent configuration for container + NetworkingConfig dockerNetwork.NetworkingConfig // network configuration for container + UnsetEnv []string // unset specified default environment variables + UnsetEnvAll bool // unset all default environment variables +} + +type ContainerTopOKBody struct { + dockerContainer.ContainerTopOKBody +} + +type PodTopOKBody struct { + dockerContainer.ContainerTopOKBody +} + +// HistoryResponse provides details on image layers +type HistoryResponse struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +type ExecCreateConfig struct { + docker.ExecConfig +} + +type ExecStartConfig struct { + Detach bool `json:"Detach"` + Tty bool `json:"Tty"` + Height uint16 `json:"h"` + Width uint16 `json:"w"` +} + +func ImageDataToImageInspect(ctx context.Context, l *libimage.Image) (*ImageInspect, error) { + options := &libimage.InspectOptions{WithParent: true, WithSize: true} + info, err := l.Inspect(context.Background(), options) + if err != nil { + return nil, err + } + ports, err := portsToPortSet(info.Config.ExposedPorts) + if err != nil { + return nil, err + } + + // TODO: many fields in Config still need wiring + config := dockerContainer.Config{ + User: info.User, + ExposedPorts: ports, + Env: info.Config.Env, + Cmd: info.Config.Cmd, + Volumes: info.Config.Volumes, + WorkingDir: info.Config.WorkingDir, + Entrypoint: info.Config.Entrypoint, + Labels: info.Labels, + StopSignal: info.Config.StopSignal, + } + + rootfs := docker.RootFS{} + if info.RootFS != nil { + rootfs.Type = info.RootFS.Type + rootfs.Layers = make([]string, 0, len(info.RootFS.Layers)) + for _, layer := range info.RootFS.Layers { + rootfs.Layers = append(rootfs.Layers, string(layer)) + } + } + + graphDriver := docker.GraphDriverData{ + Name: info.GraphDriver.Name, + Data: info.GraphDriver.Data, + } + // Add in basic ContainerConfig to satisfy docker-compose + cc := new(dockerContainer.Config) + cc.Hostname = info.ID[0:11] // short ID is the hostname + cc.Volumes = info.Config.Volumes + + dockerImageInspect := docker.ImageInspect{ + Architecture: info.Architecture, + Author: info.Author, + Comment: info.Comment, + Config: &config, + ContainerConfig: cc, + Created: l.Created().Format(time.RFC3339Nano), + DockerVersion: info.Version, + GraphDriver: graphDriver, + ID: "sha256:" + l.ID(), + Metadata: docker.ImageMetadata{}, + Os: info.Os, + OsVersion: info.Version, + Parent: info.Parent, + RepoDigests: info.RepoDigests, + RepoTags: info.RepoTags, + RootFS: rootfs, + Size: info.Size, + Variant: "", + VirtualSize: info.VirtualSize, + } + return &ImageInspect{dockerImageInspect}, nil +} + +// portsToPortSet converts libpod's exposed ports to docker's structs +func portsToPortSet(input map[string]struct{}) (nat.PortSet, error) { + ports := make(nat.PortSet) + for k := range input { + proto, port := nat.SplitProtoPort(k) + switch proto { + // See the OCI image spec for details: + // https://github.com/opencontainers/image-spec/blob/e562b04403929d582d449ae5386ff79dd7961a11/config.md#properties + case "tcp", "": + p, err := nat.NewPort("tcp", port) + if err != nil { + return nil, errors.Wrapf(err, "unable to create tcp port from %s", k) + } + ports[p] = struct{}{} + case "udp": + p, err := nat.NewPort("udp", port) + if err != nil { + return nil, errors.Wrapf(err, "unable to create tcp port from %s", k) + } + ports[p] = struct{}{} + default: + return nil, errors.Errorf("invalid port proto %q in %q", proto, k) + } + } + return ports, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types/types.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types/types.go new file mode 100644 index 00000000000..71e1d50248f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types/types.go @@ -0,0 +1,21 @@ +package types + +import "github.com/containers/podman/v4/pkg/domain/entities" + +// LibpodImagesRemoveReport is the return type for image removal via the rest +// api. +type LibpodImagesRemoveReport struct { + entities.ImageRemoveReport + // Image removal requires is to return data and an error. + Errors []string +} + +// HistoryResponse provides details on image layers +type HistoryResponse struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/auth/auth.go b/vendor/github.com/containers/podman/v4/pkg/auth/auth.go new file mode 100644 index 00000000000..4192250079f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/auth/auth.go @@ -0,0 +1,346 @@ +package auth + +import ( + "encoding/base64" + "encoding/json" + "io/ioutil" + "net/http" + "os" + "strings" + + imageAuth "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/types" + dockerAPITypes "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// xRegistryAuthHeader is the key to the encoded registry authentication configuration in an http-request header. +// This header supports one registry per header occurrence. To support N registries provide N headers, one per registry. +// As of Docker API 1.40 and Libpod API 1.0.0, this header is supported by all endpoints. +const xRegistryAuthHeader = "X-Registry-Auth" + +// xRegistryConfigHeader is the key to the encoded registry authentication configuration in an http-request header. +// This header supports N registries in one header via a Base64 encoded, JSON map. +// As of Docker API 1.40 and Libpod API 2.0.0, this header is supported by build endpoints. +const xRegistryConfigHeader = "X-Registry-Config" + +// GetCredentials queries the http.Request for X-Registry-.* headers and extracts +// the necessary authentication information for libpod operations, possibly +// creating a config file. If that is the case, the caller must call RemoveAuthFile. +func GetCredentials(r *http.Request) (*types.DockerAuthConfig, string, error) { + nonemptyHeaderValue := func(key string) ([]string, bool) { + hdr := r.Header.Values(key) + return hdr, len(hdr) > 0 + } + var override *types.DockerAuthConfig + var fileContents map[string]types.DockerAuthConfig + var headerName string + var err error + if hdr, ok := nonemptyHeaderValue(xRegistryConfigHeader); ok { + headerName = xRegistryConfigHeader + override, fileContents, err = getConfigCredentials(r, hdr) + } else if hdr, ok := nonemptyHeaderValue(xRegistryAuthHeader); ok { + headerName = xRegistryAuthHeader + override, fileContents, err = getAuthCredentials(hdr) + } else { + return nil, "", nil + } + if err != nil { + return nil, "", errors.Wrapf(err, "failed to parse %q header for %s", headerName, r.URL.String()) + } + + var authFile string + if fileContents == nil { + authFile = "" + } else { + authFile, err = authConfigsToAuthFile(fileContents) + if err != nil { + return nil, "", errors.Wrapf(err, "failed to parse %q header for %s", headerName, r.URL.String()) + } + } + return override, authFile, nil +} + +// getConfigCredentials extracts one or more docker.AuthConfig from a request and its +// xRegistryConfigHeader value. An empty key will be used as default while a named registry will be +// returned as types.DockerAuthConfig +func getConfigCredentials(r *http.Request, headers []string) (*types.DockerAuthConfig, map[string]types.DockerAuthConfig, error) { + var auth *types.DockerAuthConfig + configs := make(map[string]types.DockerAuthConfig) + + for _, h := range headers { + param, err := base64.URLEncoding.DecodeString(h) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to decode %q", xRegistryConfigHeader) + } + + ac := make(map[string]dockerAPITypes.AuthConfig) + err = json.Unmarshal(param, &ac) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to unmarshal %q", xRegistryConfigHeader) + } + + for k, v := range ac { + configs[k] = dockerAuthToImageAuth(v) + } + } + + // Empty key implies no registry given in API + if c, found := configs[""]; found { + auth = &c + } + + // Override any default given above if specialized credentials provided + if registries, found := r.URL.Query()["registry"]; found { + for _, r := range registries { + for k, v := range configs { + if strings.Contains(k, r) { + v := v + auth = &v + break + } + } + if auth != nil { + break + } + } + + if auth == nil { + logrus.Debugf("%q header found in request, but \"registry=%v\" query parameter not provided", + xRegistryConfigHeader, registries) + } else { + logrus.Debugf("%q header found in request for username %q", xRegistryConfigHeader, auth.Username) + } + } + + return auth, configs, nil +} + +// getAuthCredentials extracts one or more DockerAuthConfigs from an xRegistryAuthHeader +// value. The header could specify a single-auth config in which case the +// first return value is set. In case of a multi-auth header, the contents are +// returned in the second return value. +func getAuthCredentials(headers []string) (*types.DockerAuthConfig, map[string]types.DockerAuthConfig, error) { + authHeader := headers[0] + + // First look for a multi-auth header (i.e., a map). + authConfigs, err := parseMultiAuthHeader(authHeader) + if err == nil { + return nil, authConfigs, nil + } + + // Fallback to looking for a single-auth header (i.e., one config). + authConfig, err := parseSingleAuthHeader(authHeader) + if err != nil { + return nil, nil, err + } + return &authConfig, nil, nil +} + +// MakeXRegistryConfigHeader returns a map with the "X-Registry-Config" header set, which can +// conveniently be used in the http stack. +func MakeXRegistryConfigHeader(sys *types.SystemContext, username, password string) (http.Header, error) { + if sys == nil { + sys = &types.SystemContext{} + } + authConfigs, err := imageAuth.GetAllCredentials(sys) + if err != nil { + return nil, err + } + + if username != "" { + authConfigs[""] = types.DockerAuthConfig{ + Username: username, + Password: password, + } + } + + if len(authConfigs) == 0 { + return nil, nil + } + content, err := encodeMultiAuthConfigs(authConfigs) + if err != nil { + return nil, err + } + return http.Header{xRegistryConfigHeader: []string{content}}, nil +} + +// MakeXRegistryAuthHeader returns a map with the "X-Registry-Auth" header set, which can +// conveniently be used in the http stack. +func MakeXRegistryAuthHeader(sys *types.SystemContext, username, password string) (http.Header, error) { + if username != "" { + content, err := encodeSingleAuthConfig(types.DockerAuthConfig{Username: username, Password: password}) + if err != nil { + return nil, err + } + return http.Header{xRegistryAuthHeader: []string{content}}, nil + } + + if sys == nil { + sys = &types.SystemContext{} + } + authConfigs, err := imageAuth.GetAllCredentials(sys) + if err != nil { + return nil, err + } + content, err := encodeMultiAuthConfigs(authConfigs) + if err != nil { + return nil, err + } + return http.Header{xRegistryAuthHeader: []string{content}}, nil +} + +// RemoveAuthfile is a convenience function that is meant to be called in a +// deferred statement. If non-empty, it removes the specified authfile and log +// errors. It's meant to reduce boilerplate code at call sites of +// `GetCredentials`. +func RemoveAuthfile(authfile string) { + if authfile == "" { + return + } + if err := os.Remove(authfile); err != nil { + logrus.Errorf("Removing temporary auth file %q: %v", authfile, err) + } +} + +// encodeSingleAuthConfig serializes the auth configuration as a base64 encoded JSON payload. +func encodeSingleAuthConfig(authConfig types.DockerAuthConfig) (string, error) { + conf := imageAuthToDockerAuth(authConfig) + buf, err := json.Marshal(conf) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// encodeMultiAuthConfigs serializes the auth configurations as a base64 encoded JSON payload. +func encodeMultiAuthConfigs(authConfigs map[string]types.DockerAuthConfig) (string, error) { + confs := make(map[string]dockerAPITypes.AuthConfig) + for registry, authConf := range authConfigs { + confs[registry] = imageAuthToDockerAuth(authConf) + } + buf, err := json.Marshal(confs) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// authConfigsToAuthFile stores the specified auth configs in a temporary files +// and returns its path. The file can later be used an auth file for contacting +// one or more container registries. If tmpDir is empty, the system's default +// TMPDIR will be used. +func authConfigsToAuthFile(authConfigs map[string]types.DockerAuthConfig) (string, error) { + // Initialize an empty temporary JSON file. + tmpFile, err := ioutil.TempFile("", "auth.json.") + if err != nil { + return "", err + } + if _, err := tmpFile.Write([]byte{'{', '}'}); err != nil { + return "", errors.Wrap(err, "error initializing temporary auth file") + } + if err := tmpFile.Close(); err != nil { + return "", errors.Wrap(err, "error closing temporary auth file") + } + authFilePath := tmpFile.Name() + + // TODO: It would be nice if c/image could dump the map at once. + // + // Now use the c/image packages to store the credentials. It's battle + // tested, and we make sure to use the same code as the image backend. + sys := types.SystemContext{AuthFilePath: authFilePath} + for authFileKey, config := range authConfigs { + key := normalizeAuthFileKey(authFileKey) + + // Note that we do not validate the credentials here. We assume + // that all credentials are valid. They'll be used on demand + // later. + if err := imageAuth.SetAuthentication(&sys, key, config.Username, config.Password); err != nil { + return "", errors.Wrapf(err, "error storing credentials in temporary auth file (key: %q / %q, user: %q)", authFileKey, key, config.Username) + } + } + + return authFilePath, nil +} + +// normalizeAuthFileKey takes an auth file key and converts it into a new-style credential key +// in the canonical format, as interpreted by c/image/pkg/docker/config. +func normalizeAuthFileKey(authFileKey string) string { + stripped := strings.TrimPrefix(authFileKey, "http://") + stripped = strings.TrimPrefix(stripped, "https://") + + if stripped != authFileKey { // URLs are interpreted to mean complete registries + stripped = strings.SplitN(stripped, "/", 2)[0] + } + + // Only non-namespaced registry names (or URLs) need to be normalized; repo namespaces + // always use the simple format. + switch stripped { + case "registry-1.docker.io", "index.docker.io": + return "docker.io" + default: + return stripped + } +} + +// dockerAuthToImageAuth converts a docker auth config to one we're using +// internally from c/image. Note that the Docker types look slightly +// different, so we need to convert to be extra sure we're not running into +// undesired side-effects when unmarhalling directly to our types. +func dockerAuthToImageAuth(authConfig dockerAPITypes.AuthConfig) types.DockerAuthConfig { + return types.DockerAuthConfig{ + Username: authConfig.Username, + Password: authConfig.Password, + IdentityToken: authConfig.IdentityToken, + } +} + +// reverse conversion of `dockerAuthToImageAuth`. +func imageAuthToDockerAuth(authConfig types.DockerAuthConfig) dockerAPITypes.AuthConfig { + return dockerAPITypes.AuthConfig{ + Username: authConfig.Username, + Password: authConfig.Password, + IdentityToken: authConfig.IdentityToken, + } +} + +// parseSingleAuthHeader extracts a DockerAuthConfig from an xRegistryAuthHeader value. +// The header content is a single DockerAuthConfig. +func parseSingleAuthHeader(authHeader string) (types.DockerAuthConfig, error) { + // Accept "null" and handle it as empty value for compatibility reason with Docker. + // Some java docker clients pass this value, e.g. this one used in Eclipse. + if len(authHeader) == 0 || authHeader == "null" { + return types.DockerAuthConfig{}, nil + } + + authConfig := dockerAPITypes.AuthConfig{} + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authHeader)) + if err := json.NewDecoder(authJSON).Decode(&authConfig); err != nil { + return types.DockerAuthConfig{}, err + } + return dockerAuthToImageAuth(authConfig), nil +} + +// parseMultiAuthHeader extracts a DockerAuthConfig from an xRegistryAuthHeader value. +// The header content is a map[string]DockerAuthConfigs. +func parseMultiAuthHeader(authHeader string) (map[string]types.DockerAuthConfig, error) { + // Accept "null" and handle it as empty value for compatibility reason with Docker. + // Some java docker clients pass this value, e.g. this one used in Eclipse. + if len(authHeader) == 0 || authHeader == "null" { + return nil, nil + } + + dockerAuthConfigs := make(map[string]dockerAPITypes.AuthConfig) + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authHeader)) + if err := json.NewDecoder(authJSON).Decode(&dockerAuthConfigs); err != nil { + return nil, err + } + + // Now convert to the internal types. + authConfigs := make(map[string]types.DockerAuthConfig) + for server := range dockerAuthConfigs { + authConfigs[server] = dockerAuthToImageAuth(dockerAuthConfigs[server]) + } + return authConfigs, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/README.md b/vendor/github.com/containers/podman/v4/pkg/bindings/README.md new file mode 100644 index 00000000000..ebc8a13d1b4 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/README.md @@ -0,0 +1,241 @@ +# Podman Golang bindings +The Podman Go bindings are a set of functions to allow developers to execute Podman operations from within their Go based application. The Go bindings +connect to a Podman service which can run locally or on a remote machine. You can perform many operations including pulling and listing images, starting, +stopping or inspecting containers. Currently, the Podman repository has bindings available for operations on images, containers, pods, +networks and manifests among others. + +## Quick Start +The bindings require that the Podman system service is running for the specified user. This can be done with systemd using the `systemctl` command or manually +by calling the service directly. + +### Starting the service with system +The command to start the Podman service differs slightly depending on the user that is running the service. For a rootful service, +start the service like this: +``` +# systemctl start podman.socket +``` +For a non-privileged, aka rootless, user, start the service like this: + +``` +$ systemctl start --user podman.socket +``` + +### Starting the service manually +It can be handy to run the system service manually. Doing so allows you to enable debug messaging. +``` +$ podman --log-level=debug system service -t0 +``` +If you do not provide a specific path for the socket, a default is provided. The location of that socket for +rootful connections is `/run/podman/podman.sock` and for rootless it is `/run/USERID#/podman/podman.sock`. For more +information about the Podman system service, see `man podman-system-service`. + +### Creating a connection +Ensure the [required dependencies](https://podman.io/getting-started/installation#build-and-run-dependencies) are installed, +as they will be required to compile a Go program making use of the bindings. + + +The first step for using the bindings is to create a connection to the socket. As mentioned earlier, the destination +of the socket depends on the user who owns it. In this case, a rootful connection is made. + +``` +import ( + "context" + "fmt" + "os" + + "github.com/containers/podman/v4/pkg/bindings" +) + +func main() { + conn, err := bindings.NewConnection(context.Background(), "unix://run/podman/podman.sock") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + +} +``` +The `conn` variable returned from the `bindings.NewConnection` function can then be used in subsequent function calls +to interact with containers. + +### Examples +The following examples build upon the connection example from above. They are all rootful connections as well. + +Note: Optional arguments to the bindings methods are set using With*() methods on *Option structures. +Composite types are not duplicated rather the address is used. As such, you should not change an underlying +field between initializing the *Option structure and calling the bindings method. + +#### Inspect a container +The following example obtains the inspect information for a container named `foorbar` and then prints +the container's ID. Note the use of optional inspect options for size. +``` +import ( + "context" + "fmt" + "os" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/bindings/containers" +) + +func main() { + conn, err := bindings.NewConnection(context.Background(), "unix://run/podman/podman.sock") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + inspectData, err := containers.Inspect(conn, "foobar", new(containers.InspectOptions).WithSize(true)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + // Print the container ID + fmt.Println(inspectData.ID) +} +``` + +#### Pull an image +The following example pulls the image `quay.ioo/libpod/alpine_nginx` to the local image store. +``` +import ( + "context" + "fmt" + "os" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/bindings/images" +) + +func main() { + conn, err := bindings.NewConnection(context.Background(), "unix://run/podman/podman.sock") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + _, err = images.Pull(conn, "quay.io/libpod/alpine_nginx", nil) + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +``` + +#### Pull an image, create a container, and start the container +The following example pulls the `quay.io/libpod/alpine_nginx` image and then creates a container named `foobar` +from it. And finally, it starts the container. +``` +import ( + "context" + "fmt" + "os" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/bindings/containers" + "github.com/containers/podman/v4/pkg/bindings/images" + "github.com/containers/podman/v4/pkg/specgen" +) + +func main() { + conn, err := bindings.NewConnection(context.Background(), "unix://run/podman/podman.sock") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + _, err = images.Pull(conn, "quay.io/libpod/alpine_nginx", nil) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + s := specgen.NewSpecGenerator("quay.io/libpod/alpine_nginx", false) + s.Name = "foobar" + createResponse, err := containers.CreateWithSpec(conn, s, nil) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Println("Container created.") + if err := containers.Start(conn, createResponse.ID, nil); err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Println("Container started.") +} +``` + +## Debugging tips + +To debug in a development setup, you can start the Podman system service +in debug mode like: + +```bash +$ podman --log-level=debug system service -t 0 +``` + +The `--log-level=debug` echoes all the logged requests and is useful to +trace the execution path at a finer granularity. A snippet of a sample run looks like: + +```bash +INFO[0000] podman filtering at log level debug +DEBU[0000] Called service.PersistentPreRunE(podman --log-level=debug system service -t0) +DEBU[0000] Ignoring libpod.conf EventsLogger setting "/home/lsm5/.config/containers/containers.conf". Use "journald" if you want to change this setting and remove libpod.conf files. +DEBU[0000] Reading configuration file "/usr/share/containers/containers.conf" +DEBU[0000] Merged system config "/usr/share/containers/containers.conf": {Editors note: the remainder of this line was removed due to Jekyll formatting errors.} +DEBU[0000] Using conmon: "/usr/bin/conmon" +DEBU[0000] Initializing boltdb state at /home/lsm5/.local/share/containers/storage/libpod/bolt_state.db +DEBU[0000] Overriding run root "/run/user/1000/containers" with "/run/user/1000" from database +DEBU[0000] Using graph driver overlay +DEBU[0000] Using graph root /home/lsm5/.local/share/containers/storage +DEBU[0000] Using run root /run/user/1000 +DEBU[0000] Using static dir /home/lsm5/.local/share/containers/storage/libpod +DEBU[0000] Using tmp dir /run/user/1000/libpod/tmp +DEBU[0000] Using volume path /home/lsm5/.local/share/containers/storage/volumes +DEBU[0000] Set libpod namespace to "" +DEBU[0000] Not configuring container store +DEBU[0000] Initializing event backend file +DEBU[0000] using runtime "/usr/bin/runc" +DEBU[0000] using runtime "/usr/bin/crun" +WARN[0000] Error initializing configured OCI runtime kata: no valid executable found for OCI runtime kata: invalid argument +DEBU[0000] using runtime "/usr/bin/crun" +INFO[0000] Setting parallel job count to 25 +INFO[0000] podman filtering at log level debug +DEBU[0000] Called service.PersistentPreRunE(podman --log-level=debug system service -t0) +DEBU[0000] Ignoring libpod.conf EventsLogger setting "/home/lsm5/.config/containers/containers.conf". Use "journald" if you want to change this setting and remove libpod.conf files. +DEBU[0000] Reading configuration file "/usr/share/containers/containers.conf" +``` + +If the Podman system service has been started via systemd socket activation, +you can view the logs using journalctl. The logs after a sample run look like: + +```bash +$ journalctl --user --no-pager -u podman.socket +-- Reboot -- +Jul 22 13:50:40 nagato.nanadai.me systemd[1048]: Listening on Podman API Socket. +$ +``` + +```bash +$ journalctl --user --no-pager -u podman.service +Jul 22 13:50:53 nagato.nanadai.me systemd[1048]: Starting Podman API Service... +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 38480630a8bdaa3e1a0ebd34c94038591b0d7ad994b37be5b4f2072bb6ef0879: error acquiring lock 0 for volume 38480630a8bdaa3e1a0ebd34c94038591b0d7ad994b37be5b4f2072bb6ef0879: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 47d410af4d762a0cc456a89e58f759937146fa3be32b5e95a698a1d4069f4024: error acquiring lock 0 for volume 47d410af4d762a0cc456a89e58f759937146fa3be32b5e95a698a1d4069f4024: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 86e73f082e344dad38c8792fb86b2017c4f133f2a8db87f239d1d28a78cf0868: error acquiring lock 0 for volume 86e73f082e344dad38c8792fb86b2017c4f133f2a8db87f239d1d28a78cf0868: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume 9a16ea764be490a5563e384d9074ab0495e4d9119be380c664037d6cf1215631: error acquiring lock 0 for volume 9a16ea764be490a5563e384d9074ab0495e4d9119be380c664037d6cf1215631: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume bfd6b2a97217f8655add13e0ad3f6b8e1c79bc1519b7a1e15361a107ccf57fc0: error acquiring lock 0 for volume bfd6b2a97217f8655add13e0ad3f6b8e1c79bc1519b7a1e15361a107ccf57fc0: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: time="2020-07-22T13:50:54-04:00" level=error msg="Error refreshing volume f9b9f630982452ebcbed24bd229b142fbeecd5d4c85791fca440b21d56fef563: error acquiring lock 0 for volume f9b9f630982452ebcbed24bd229b142fbeecd5d4c85791fca440b21d56fef563: file exists" +Jul 22 13:50:54 nagato.nanadai.me podman[1527]: Trying to pull registry.fedoraproject.org/fedora:latest... +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Getting image source signatures +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Copying blob sha256:dd9f43919ba05f05d4f783c31e83e5e776c4f5d29dd72b9ec5056b9576c10053 +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Copying config sha256:00ff39a8bf19f810a7e641f7eb3ddc47635913a19c4996debd91fafb6b379069 +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Writing manifest to image destination +Jul 22 13:50:55 nagato.nanadai.me podman[1527]: Storing signatures +Jul 22 13:50:55 nagato.nanadai.me systemd[1048]: podman.service: unit configures an IP firewall, but not running as root. +Jul 22 13:50:55 nagato.nanadai.me systemd[1048]: (This warning is only shown for the first unit using IP firewalling.) +Jul 22 13:51:15 nagato.nanadai.me systemd[1048]: podman.service: Succeeded. +Jul 22 13:51:15 nagato.nanadai.me systemd[1048]: Finished Podman API Service. +Jul 22 13:51:15 nagato.nanadai.me systemd[1048]: podman.service: Consumed 1.339s CPU time. +$ +``` + +You can also verify that the information being passed back and forth is correct by putting +with a tool like `socat`, which can dump what the socket is seeing. diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go b/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go new file mode 100644 index 00000000000..3739ec404fd --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go @@ -0,0 +1,412 @@ +package bindings + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/blang/semver" + "github.com/containers/podman/v4/pkg/terminal" + "github.com/containers/podman/v4/version" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +type APIResponse struct { + *http.Response + Request *http.Request +} + +type Connection struct { + URI *url.URL + Client *http.Client +} + +type valueKey string + +const ( + clientKey = valueKey("Client") + versionKey = valueKey("ServiceVersion") +) + +// GetClient from context build by NewConnection() +func GetClient(ctx context.Context) (*Connection, error) { + if c, ok := ctx.Value(clientKey).(*Connection); ok { + return c, nil + } + return nil, errors.Errorf("%s not set in context", clientKey) +} + +// ServiceVersion from context build by NewConnection() +func ServiceVersion(ctx context.Context) *semver.Version { + if v, ok := ctx.Value(versionKey).(*semver.Version); ok { + return v + } + return new(semver.Version) +} + +// JoinURL elements with '/' +func JoinURL(elements ...string) string { + return "/" + strings.Join(elements, "/") +} + +// NewConnection creates a new service connection without an identity +func NewConnection(ctx context.Context, uri string) (context.Context, error) { + return NewConnectionWithIdentity(ctx, uri, "") +} + +// NewConnectionWithIdentity takes a URI as a string and returns a context with the +// Connection embedded as a value. This context needs to be passed to each +// endpoint to work correctly. +// +// A valid URI connection should be scheme:// +// For example tcp://localhost: +// or unix:///run/podman/podman.sock +// or ssh://@[:port]/run/podman/podman.sock?secure=True +func NewConnectionWithIdentity(ctx context.Context, uri string, identity string) (context.Context, error) { + var ( + err error + secure bool + ) + if v, found := os.LookupEnv("CONTAINER_HOST"); found && uri == "" { + uri = v + } + + if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found && len(identity) == 0 { + identity = v + } + + passPhrase := "" + if v, found := os.LookupEnv("CONTAINER_PASSPHRASE"); found { + passPhrase = v + } + + _url, err := url.Parse(uri) + if err != nil { + return nil, errors.Wrapf(err, "Value of CONTAINER_HOST is not a valid url: %s", uri) + } + + // Now we setup the http Client to use the connection above + var connection Connection + switch _url.Scheme { + case "ssh": + secure, err = strconv.ParseBool(_url.Query().Get("secure")) + if err != nil { + secure = false + } + connection, err = sshClient(_url, secure, passPhrase, identity) + case "unix": + if !strings.HasPrefix(uri, "unix:///") { + // autofix unix://path_element vs unix:///path_element + _url.Path = JoinURL(_url.Host, _url.Path) + _url.Host = "" + } + connection = unixClient(_url) + case "tcp": + if !strings.HasPrefix(uri, "tcp://") { + return nil, errors.New("tcp URIs should begin with tcp://") + } + connection = tcpClient(_url) + default: + return nil, errors.Errorf("unable to create connection. %q is not a supported schema", _url.Scheme) + } + if err != nil { + return nil, errors.Wrapf(err, "unable to connect to Podman. failed to create %sClient", _url.Scheme) + } + + ctx = context.WithValue(ctx, clientKey, &connection) + serviceVersion, err := pingNewConnection(ctx) + if err != nil { + return nil, errors.Wrap(err, "unable to connect to Podman socket") + } + ctx = context.WithValue(ctx, versionKey, serviceVersion) + return ctx, nil +} + +func tcpClient(_url *url.URL) Connection { + connection := Connection{ + URI: _url, + } + connection.Client = &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return net.Dial("tcp", _url.Host) + }, + DisableCompression: true, + }, + } + return connection +} + +// pingNewConnection pings to make sure the RESTFUL service is up +// and running. it should only be used when initializing a connection +func pingNewConnection(ctx context.Context) (*semver.Version, error) { + client, err := GetClient(ctx) + if err != nil { + return nil, err + } + // the ping endpoint sits at / in this case + response, err := client.DoRequest(ctx, nil, http.MethodGet, "/_ping", nil, nil) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if response.StatusCode == http.StatusOK { + versionHdr := response.Header.Get("Libpod-API-Version") + if versionHdr == "" { + logrus.Info("Service did not provide Libpod-API-Version Header") + return new(semver.Version), nil + } + versionSrv, err := semver.ParseTolerant(versionHdr) + if err != nil { + return nil, err + } + + switch version.APIVersion[version.Libpod][version.MinimalAPI].Compare(versionSrv) { + case -1, 0: + // Server's job when Client version is equal or older + return &versionSrv, nil + case 1: + return nil, errors.Errorf("server API version is too old. Client %q server %q", + version.APIVersion[version.Libpod][version.MinimalAPI].String(), versionSrv.String()) + } + } + return nil, errors.Errorf("ping response was %d", response.StatusCode) +} + +func sshClient(_url *url.URL, secure bool, passPhrase string, identity string) (Connection, error) { + // if you modify the authmethods or their conditionals, you will also need to make similar + // changes in the client (currently cmd/podman/system/connection/add getUDS). + + var signers []ssh.Signer // order Signers are appended to this list determines which key is presented to server + + if len(identity) > 0 { + s, err := terminal.PublicKey(identity, []byte(passPhrase)) + if err != nil { + return Connection{}, errors.Wrapf(err, "failed to parse identity %q", identity) + } + + signers = append(signers, s) + logrus.Debugf("SSH Ident Key %q %s %s", identity, ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } + + if sock, found := os.LookupEnv("SSH_AUTH_SOCK"); found { + logrus.Debugf("Found SSH_AUTH_SOCK %q, ssh-agent signer(s) enabled", sock) + + c, err := net.Dial("unix", sock) + if err != nil { + return Connection{}, err + } + + agentSigners, err := agent.NewClient(c).Signers() + if err != nil { + return Connection{}, err + } + signers = append(signers, agentSigners...) + + if logrus.IsLevelEnabled(logrus.DebugLevel) { + for _, s := range agentSigners { + logrus.Debugf("SSH Agent Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } + } + } + + var authMethods []ssh.AuthMethod + if len(signers) > 0 { + var dedup = make(map[string]ssh.Signer) + // Dedup signers based on fingerprint, ssh-agent keys override CONTAINER_SSHKEY + for _, s := range signers { + fp := ssh.FingerprintSHA256(s.PublicKey()) + if _, found := dedup[fp]; found { + logrus.Debugf("Dedup SSH Key %s %s", ssh.FingerprintSHA256(s.PublicKey()), s.PublicKey().Type()) + } + dedup[fp] = s + } + + var uniq []ssh.Signer + for _, s := range dedup { + uniq = append(uniq, s) + } + authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { + return uniq, nil + })) + } + + if pw, found := _url.User.Password(); found { + authMethods = append(authMethods, ssh.Password(pw)) + } + + if len(authMethods) == 0 { + callback := func() (string, error) { + pass, err := terminal.ReadPassword("Login password:") + return string(pass), err + } + authMethods = append(authMethods, ssh.PasswordCallback(callback)) + } + + port := _url.Port() + if port == "" { + port = "22" + } + + callback := ssh.InsecureIgnoreHostKey() + if secure { + host := _url.Hostname() + if port != "22" { + host = fmt.Sprintf("[%s]:%s", host, port) + } + key := terminal.HostKey(host) + if key != nil { + callback = ssh.FixedHostKey(key) + } + } + + bastion, err := ssh.Dial("tcp", + net.JoinHostPort(_url.Hostname(), port), + &ssh.ClientConfig{ + User: _url.User.Username(), + Auth: authMethods, + HostKeyCallback: callback, + HostKeyAlgorithms: []string{ + ssh.KeyAlgoRSA, + ssh.KeyAlgoDSA, + ssh.KeyAlgoECDSA256, + ssh.KeyAlgoECDSA384, + ssh.KeyAlgoECDSA521, + ssh.KeyAlgoED25519, + }, + Timeout: 5 * time.Second, + }, + ) + if err != nil { + return Connection{}, errors.Wrapf(err, "connection to bastion host (%s) failed", _url.String()) + } + + connection := Connection{URI: _url} + connection.Client = &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return bastion.Dial("unix", _url.Path) + }, + }} + return connection, nil +} + +func unixClient(_url *url.URL) Connection { + connection := Connection{URI: _url} + connection.Client = &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "unix", _url.Path) + }, + DisableCompression: true, + }, + } + return connection +} + +// DoRequest assembles the http request and returns the response +func (c *Connection) DoRequest(ctx context.Context, httpBody io.Reader, httpMethod, endpoint string, queryParams url.Values, headers http.Header, pathValues ...string) (*APIResponse, error) { + var ( + err error + response *http.Response + ) + + params := make([]interface{}, len(pathValues)+1) + + if v := headers.Values("API-Version"); len(v) > 0 { + params[0] = v[0] + } else { + // Including the semver suffices breaks older services... so do not include them + v := version.APIVersion[version.Libpod][version.CurrentAPI] + params[0] = fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) + } + + for i, pv := range pathValues { + // url.URL lacks the semantics for escaping embedded path parameters... so we manually + // escape each one and assume the caller included the correct formatting in "endpoint" + params[i+1] = url.PathEscape(pv) + } + + uri := fmt.Sprintf("http://d/v%s/libpod"+endpoint, params...) + logrus.Debugf("DoRequest Method: %s URI: %v", httpMethod, uri) + + req, err := http.NewRequestWithContext(ctx, httpMethod, uri, httpBody) + if err != nil { + return nil, err + } + if len(queryParams) > 0 { + req.URL.RawQuery = queryParams.Encode() + } + + for key, val := range headers { + if key == "API-Version" { + continue + } + + for _, v := range val { + req.Header.Add(key, v) + } + } + + // Give the Do three chances in the case of a comm/service hiccup + for i := 1; i <= 3; i++ { + response, err = c.Client.Do(req) // nolint + if err == nil { + break + } + time.Sleep(time.Duration(i*100) * time.Millisecond) + } + return &APIResponse{response, req}, err +} + +// GetDialer returns raw Transport.DialContext from client +func (c *Connection) GetDialer(ctx context.Context) (net.Conn, error) { + client := c.Client + transport := client.Transport.(*http.Transport) + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, c.URI.Scheme, c.URI.String()) + } + + return nil, errors.New("Unable to get dial context") +} + +// IsInformational returns true if the response code is 1xx +func (h *APIResponse) IsInformational() bool { + return h.Response.StatusCode/100 == 1 +} + +// IsSuccess returns true if the response code is 2xx +func (h *APIResponse) IsSuccess() bool { + return h.Response.StatusCode/100 == 2 +} + +// IsRedirection returns true if the response code is 3xx +func (h *APIResponse) IsRedirection() bool { + return h.Response.StatusCode/100 == 3 +} + +// IsClientError returns true if the response code is 4xx +func (h *APIResponse) IsClientError() bool { + return h.Response.StatusCode/100 == 4 +} + +// IsConflictError returns true if the response code is 409 +func (h *APIResponse) IsConflictError() bool { + return h.Response.StatusCode == 409 +} + +// IsServerError returns true if the response code is 5xx +func (h *APIResponse) IsServerError() bool { + return h.Response.StatusCode/100 == 5 +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go new file mode 100644 index 00000000000..4f4b5a36a62 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go @@ -0,0 +1,110 @@ +package containers + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/copy" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/pkg/errors" +) + +// Stat checks if the specified path is on the container. Note that the stat +// report may be set even in case of an error. This happens when the path +// resolves to symlink pointing to a non-existent path. +func Stat(ctx context.Context, nameOrID string, path string) (*entities.ContainerStatReport, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params := url.Values{} + params.Set("path", path) + + response, err := conn.DoRequest(ctx, nil, http.MethodHead, "/containers/%s/archive", params, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + var finalErr error + if response.StatusCode == http.StatusNotFound { + finalErr = copy.ErrENOENT + } else if response.StatusCode != http.StatusOK { + finalErr = errors.New(response.Status) + } + + var statReport *entities.ContainerStatReport + + fileInfo, err := copy.ExtractFileInfoFromHeader(&response.Header) + if err != nil && finalErr == nil { + return nil, err + } + + if fileInfo != nil { + statReport = &entities.ContainerStatReport{FileInfo: *fileInfo} + } + + return statReport, finalErr +} + +func CopyFromArchive(ctx context.Context, nameOrID string, path string, reader io.Reader) (entities.ContainerCopyFunc, error) { + return CopyFromArchiveWithOptions(ctx, nameOrID, path, reader, nil) +} + +// CopyFromArchiveWithOptions copy files into container +// +// FIXME: remove this function and make CopyFromArchive accept the option as the last parameter in podman 4.0 +func CopyFromArchiveWithOptions(ctx context.Context, nameOrID string, path string, reader io.Reader, options *CopyOptions) (entities.ContainerCopyFunc, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + + params, err := options.ToParams() + if err != nil { + return nil, err + } + + params.Set("path", path) + + return func() error { + response, err := conn.DoRequest(ctx, reader, http.MethodPut, "/containers/%s/archive", params, nil, nameOrID) + if err != nil { + return err + } + + if response.StatusCode != http.StatusOK { + return errors.New(response.Status) + } + return response.Process(nil) + }, nil +} + +// CopyToArchive copy files from container +func CopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (entities.ContainerCopyFunc, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params := url.Values{} + params.Set("path", path) + + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/archive", params, nil, nameOrID) + if err != nil { + return nil, err + } + + if response.StatusCode != http.StatusOK { + defer response.Body.Close() + return nil, response.Process(nil) + } + + return func() error { + defer response.Body.Close() + _, err := io.Copy(writer, response.Body) + return err + }, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go new file mode 100644 index 00000000000..d84b4705215 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go @@ -0,0 +1,572 @@ +package containers + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "time" + + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/utils" + "github.com/moby/term" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + terminal "golang.org/x/term" +) + +// The CloseWriter interface is used to determine whether we can do a one-sided +// close of a hijacked connection. +type CloseWriter interface { + CloseWrite() error +} + +// Attach attaches to a running container +func Attach(ctx context.Context, nameOrID string, stdin io.Reader, stdout io.Writer, stderr io.Writer, attachReady chan bool, options *AttachOptions) error { + if options == nil { + options = new(AttachOptions) + } + isSet := struct { + stdin bool + stdout bool + stderr bool + }{ + stdin: !(stdin == nil || reflect.ValueOf(stdin).IsNil()), + stdout: !(stdout == nil || reflect.ValueOf(stdout).IsNil()), + stderr: !(stderr == nil || reflect.ValueOf(stderr).IsNil()), + } + // Ensure golang can determine that interfaces are "really" nil + if !isSet.stdin { + stdin = (io.Reader)(nil) + } + if !isSet.stdout { + stdout = (io.Writer)(nil) + } + if !isSet.stderr { + stderr = (io.Writer)(nil) + } + + logrus.Infof("Going to attach to container %q", nameOrID) + + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + + // Do we need to wire in stdin? + ctnr, err := Inspect(ctx, nameOrID, new(InspectOptions).WithSize(false)) + if err != nil { + return err + } + + params, err := options.ToParams() + if err != nil { + return err + } + detachKeysInBytes := []byte{} + if options.Changed("DetachKeys") { + params.Add("detachKeys", options.GetDetachKeys()) + + detachKeysInBytes, err = term.ToBytes(options.GetDetachKeys()) + if err != nil { + return errors.Wrapf(err, "invalid detach keys") + } + } + if isSet.stdin { + params.Add("stdin", "true") + } + if isSet.stdout { + params.Add("stdout", "true") + } + if isSet.stderr { + params.Add("stderr", "true") + } + + // Unless all requirements are met, don't use "stdin" is a terminal + file, ok := stdin.(*os.File) + outFile, outOk := stdout.(*os.File) + needTTY := ok && outOk && terminal.IsTerminal(int(file.Fd())) && ctnr.Config.Tty + if needTTY { + state, err := setRawTerminal(file) + if err != nil { + return err + } + defer func() { + if err := terminal.Restore(int(file.Fd()), state); err != nil { + logrus.Errorf("Unable to restore terminal: %q", err) + } + logrus.SetFormatter(&logrus.TextFormatter{}) + }() + } + + headers := make(http.Header) + headers.Add("Connection", "Upgrade") + headers.Add("Upgrade", "tcp") + + var socket net.Conn + socketSet := false + dialContext := conn.Client.Transport.(*http.Transport).DialContext + t := &http.Transport{ + DialContext: func(ctx context.Context, network, address string) (net.Conn, error) { + c, err := dialContext(ctx, network, address) + if err != nil { + return nil, err + } + if !socketSet { + socket = c + socketSet = true + } + return c, err + }, + IdleConnTimeout: time.Duration(0), + } + conn.Client.Transport = t + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/attach", params, headers, nameOrID) + if err != nil { + return err + } + + if !(response.IsSuccess() || response.IsInformational()) { + defer response.Body.Close() + return response.Process(nil) + } + + if needTTY { + winChange := make(chan os.Signal, 1) + winCtx, winCancel := context.WithCancel(ctx) + defer winCancel() + notifyWinChange(winCtx, winChange, file, outFile) + attachHandleResize(ctx, winCtx, winChange, false, nameOrID, file, outFile) + } + + // If we are attaching around a start, we need to "signal" + // back that we are in fact attached so that started does + // not execute before we can attach. + if attachReady != nil { + attachReady <- true + } + + stdoutChan := make(chan error) + stdinChan := make(chan error, 1) // stdin channel should not block + + if isSet.stdin { + go func() { + logrus.Debugf("Copying STDIN to socket") + + _, err := utils.CopyDetachable(socket, stdin, detachKeysInBytes) + if err != nil && err != define.ErrDetach { + logrus.Errorf("Failed to write input to service: %v", err) + } + if err == nil { + if closeWrite, ok := socket.(CloseWriter); ok { + if err := closeWrite.CloseWrite(); err != nil { + logrus.Warnf("Failed to close STDIN for writing: %v", err) + } + } + } + stdinChan <- err + }() + } + + buffer := make([]byte, 1024) + if ctnr.Config.Tty { + go func() { + logrus.Debugf("Copying STDOUT of container in terminal mode") + + if !isSet.stdout { + stdoutChan <- fmt.Errorf("container %q requires stdout to be set", ctnr.ID) + } + // If not multiplex'ed, read from server and write to stdout + _, err := io.Copy(stdout, socket) + + stdoutChan <- err + }() + + for { + select { + case err := <-stdoutChan: + if err != nil { + return err + } + + return nil + case err := <-stdinChan: + if err != nil { + return err + } + + return nil + } + } + } else { + logrus.Debugf("Copying standard streams of container %q in non-terminal mode", ctnr.ID) + for { + // Read multiplexed channels and write to appropriate stream + fd, l, err := DemuxHeader(socket, buffer) + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return nil + } + return err + } + frame, err := DemuxFrame(socket, buffer, l) + if err != nil { + return err + } + + switch { + case fd == 0: + if isSet.stdout { + if _, err := stdout.Write(frame[0:l]); err != nil { + return err + } + } + case fd == 1: + if isSet.stdout { + if _, err := stdout.Write(frame[0:l]); err != nil { + return err + } + } + case fd == 2: + if isSet.stderr { + if _, err := stderr.Write(frame[0:l]); err != nil { + return err + } + } + case fd == 3: + return fmt.Errorf("from service from stream: %s", frame) + default: + return fmt.Errorf("unrecognized channel '%d' in header, 0-3 supported", fd) + } + } + } +} + +// DemuxHeader reads header for stream from server multiplexed stdin/stdout/stderr/2nd error channel +func DemuxHeader(r io.Reader, buffer []byte) (fd, sz int, err error) { + n, err := io.ReadFull(r, buffer[0:8]) + if err != nil { + return + } + if n < 8 { + err = io.ErrUnexpectedEOF + return + } + + fd = int(buffer[0]) + if fd < 0 || fd > 3 { + err = errors.Wrapf(ErrLostSync, fmt.Sprintf(`channel "%d" found, 0-3 supported`, fd)) + return + } + + sz = int(binary.BigEndian.Uint32(buffer[4:8])) + return +} + +// DemuxFrame reads contents for frame from server multiplexed stdin/stdout/stderr/2nd error channel +func DemuxFrame(r io.Reader, buffer []byte, length int) (frame []byte, err error) { + if len(buffer) < length { + buffer = append(buffer, make([]byte, length-len(buffer)+1)...) + } + + n, err := io.ReadFull(r, buffer[0:length]) + if err != nil { + return nil, err + } + if n < length { + err = io.ErrUnexpectedEOF + return + } + + return buffer[0:length], nil +} + +// ResizeContainerTTY sets container's TTY height and width in characters +func ResizeContainerTTY(ctx context.Context, nameOrID string, options *ResizeTTYOptions) error { + if options == nil { + options = new(ResizeTTYOptions) + } + return resizeTTY(ctx, bindings.JoinURL("containers", nameOrID, "resize"), options.Height, options.Width) +} + +// ResizeExecTTY sets session's TTY height and width in characters +func ResizeExecTTY(ctx context.Context, nameOrID string, options *ResizeExecTTYOptions) error { + if options == nil { + options = new(ResizeExecTTYOptions) + } + return resizeTTY(ctx, bindings.JoinURL("exec", nameOrID, "resize"), options.Height, options.Width) +} + +// resizeTTY set size of TTY of container +func resizeTTY(ctx context.Context, endpoint string, height *int, width *int) error { + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + + params := url.Values{} + if height != nil { + params.Set("h", strconv.Itoa(*height)) + } + if width != nil { + params.Set("w", strconv.Itoa(*width)) + } + params.Set("running", "true") + rsp, err := conn.DoRequest(ctx, nil, http.MethodPost, endpoint, params, nil) + if err != nil { + return err + } + defer rsp.Body.Close() + + return rsp.Process(nil) +} + +type rawFormatter struct { + logrus.TextFormatter +} + +func (f *rawFormatter) Format(entry *logrus.Entry) ([]byte, error) { + buffer, err := f.TextFormatter.Format(entry) + if err != nil { + return buffer, err + } + return append(buffer, '\r'), nil +} + +// This is intended to not be run as a goroutine, handling resizing for a container +// or exec session. It will call resize once and then starts a goroutine which calls resize on winChange +func attachHandleResize(ctx, winCtx context.Context, winChange chan os.Signal, isExec bool, id string, file *os.File, outFile *os.File) { + resize := func() { + w, h, err := getTermSize(file, outFile) + if err != nil { + logrus.Warnf("Failed to obtain TTY size: %v", err) + } + + var resizeErr error + if isExec { + resizeErr = ResizeExecTTY(ctx, id, new(ResizeExecTTYOptions).WithHeight(h).WithWidth(w)) + } else { + resizeErr = ResizeContainerTTY(ctx, id, new(ResizeTTYOptions).WithHeight(h).WithWidth(w)) + } + if resizeErr != nil { + logrus.Infof("Failed to resize TTY: %v", resizeErr) + } + } + + resize() + + go func() { + for { + select { + case <-winCtx.Done(): + return + case <-winChange: + resize() + } + } + }() +} + +// Configure the given terminal for raw mode +func setRawTerminal(file *os.File) (*terminal.State, error) { + state, err := makeRawTerm(file) + if err != nil { + return nil, err + } + + logrus.SetFormatter(&rawFormatter{}) + + return state, err +} + +// ExecStartAndAttach starts and attaches to a given exec session. +func ExecStartAndAttach(ctx context.Context, sessionID string, options *ExecStartAndAttachOptions) error { + if options == nil { + options = new(ExecStartAndAttachOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + + // TODO: Make this configurable (can't use streams' InputStream as it's + // buffered) + terminalFile := os.Stdin + terminalOutFile := os.Stdout + + logrus.Debugf("Starting & Attaching to exec session ID %q", sessionID) + + // We need to inspect the exec session first to determine whether to use + // -t. + resp, err := conn.DoRequest(ctx, nil, http.MethodGet, "/exec/%s/json", nil, nil, sessionID) + if err != nil { + return err + } + defer resp.Body.Close() + + respStruct := new(define.InspectExecSession) + if err := resp.Process(respStruct); err != nil { + return err + } + isTerm := true + if respStruct.ProcessConfig != nil { + isTerm = respStruct.ProcessConfig.Tty + } + + // If we are in TTY mode, we need to set raw mode for the terminal. + // TODO: Share all of this with Attach() for containers. + needTTY := terminalFile != nil && terminal.IsTerminal(int(terminalFile.Fd())) && isTerm + + body := struct { + Detach bool `json:"Detach"` + TTY bool `json:"Tty"` + Height uint16 `json:"h"` + Width uint16 `json:"w"` + }{ + Detach: false, + TTY: needTTY, + } + + if needTTY { + state, err := setRawTerminal(terminalFile) + if err != nil { + return err + } + defer func() { + if err := terminal.Restore(int(terminalFile.Fd()), state); err != nil { + logrus.Errorf("Unable to restore terminal: %q", err) + } + logrus.SetFormatter(&logrus.TextFormatter{}) + }() + w, h, err := getTermSize(terminalFile, terminalOutFile) + if err != nil { + logrus.Warnf("Failed to obtain TTY size: %v", err) + } + body.Width = uint16(w) + body.Height = uint16(h) + } + + bodyJSON, err := json.Marshal(body) + if err != nil { + return err + } + + var socket net.Conn + socketSet := false + dialContext := conn.Client.Transport.(*http.Transport).DialContext + t := &http.Transport{ + DialContext: func(ctx context.Context, network, address string) (net.Conn, error) { + c, err := dialContext(ctx, network, address) + if err != nil { + return nil, err + } + if !socketSet { + socket = c + socketSet = true + } + return c, err + }, + IdleConnTimeout: time.Duration(0), + } + conn.Client.Transport = t + response, err := conn.DoRequest(ctx, bytes.NewReader(bodyJSON), http.MethodPost, "/exec/%s/start", nil, nil, sessionID) + if err != nil { + return err + } + defer response.Body.Close() + + if !(response.IsSuccess() || response.IsInformational()) { + return response.Process(nil) + } + + if needTTY { + winChange := make(chan os.Signal, 1) + winCtx, winCancel := context.WithCancel(ctx) + defer winCancel() + + notifyWinChange(winCtx, winChange, terminalFile, terminalOutFile) + attachHandleResize(ctx, winCtx, winChange, true, sessionID, terminalFile, terminalOutFile) + } + + if options.GetAttachInput() { + go func() { + logrus.Debugf("Copying STDIN to socket") + _, err := utils.CopyDetachable(socket, options.InputStream, []byte{}) + if err != nil { + logrus.Errorf("Failed to write input to service: %v", err) + } + + if closeWrite, ok := socket.(CloseWriter); ok { + logrus.Debugf("Closing STDIN") + if err := closeWrite.CloseWrite(); err != nil { + logrus.Warnf("Failed to close STDIN for writing: %v", err) + } + } + }() + } + + buffer := make([]byte, 1024) + if isTerm { + logrus.Debugf("Handling terminal attach to exec") + if !options.GetAttachOutput() { + return fmt.Errorf("exec session %s has a terminal and must have STDOUT enabled", sessionID) + } + // If not multiplex'ed, read from server and write to stdout + _, err := utils.CopyDetachable(options.GetOutputStream(), socket, []byte{}) + if err != nil { + return err + } + } else { + logrus.Debugf("Handling non-terminal attach to exec") + for { + // Read multiplexed channels and write to appropriate stream + fd, l, err := DemuxHeader(socket, buffer) + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return nil + } + return err + } + frame, err := DemuxFrame(socket, buffer, l) + if err != nil { + return err + } + + switch { + case fd == 0: + if options.GetAttachInput() { + // Write STDIN to STDOUT (echoing characters + // typed by another attach session) + if _, err := options.GetOutputStream().Write(frame[0:l]); err != nil { + return err + } + } + case fd == 1: + if options.GetAttachOutput() { + if _, err := options.GetOutputStream().Write(frame[0:l]); err != nil { + return err + } + } + case fd == 2: + if options.GetAttachError() { + if _, err := options.GetErrorStream().Write(frame[0:l]); err != nil { + return err + } + } + case fd == 3: + return fmt.Errorf("from service from stream: %s", frame) + default: + return fmt.Errorf("unrecognized channel '%d' in header, 0-3 supported", fd) + } + } + } + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go new file mode 100644 index 00000000000..bcb94448860 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go @@ -0,0 +1,106 @@ +package containers + +import ( + "context" + "io" + "net/http" + "os" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" +) + +// Checkpoint checkpoints the given container (identified by nameOrID). All additional +// options are options and allow for more fine grained control of the checkpoint process. +func Checkpoint(ctx context.Context, nameOrID string, options *CheckpointOptions) (*entities.CheckpointReport, error) { + var report entities.CheckpointReport + if options == nil { + options = new(CheckpointOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + + // "export" is a bool for the server so override it in the parameters + // if set. + export := false + if options.Export != nil && *options.Export != "" { + export = true + params.Set("export", "true") + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/checkpoint", params, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if !export { + return &report, response.Process(&report) + } + + f, err := os.OpenFile(*options.Export, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return nil, err + } + defer f.Close() + if _, err := io.Copy(f, response.Body); err != nil { + return nil, err + } + + return &entities.CheckpointReport{}, nil +} + +// Restore restores a checkpointed container to running. The container is identified by the nameOrID option. All +// additional options are optional and allow finer control of the restore process. +func Restore(ctx context.Context, nameOrID string, options *RestoreOptions) (*entities.RestoreReport, error) { + var report entities.RestoreReport + if options == nil { + options = new(RestoreOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + + for _, p := range options.PublishPorts { + params.Add("publishPorts", p) + } + + params.Del("ImportArchive") // The import key is a reserved golang term + + // Open the to-be-imported archive if needed. + var r io.Reader + i := options.GetImportArchive() + if i == "" { + // backwards compat, ImportAchive is a typo but we still have to + // support this to avoid breaking users + // TODO: remove ImportAchive with 5.0 + i = options.GetImportAchive() + } + if i != "" { + params.Set("import", "true") + r, err = os.Open(i) + if err != nil { + return nil, err + } + // Hard-code the name since it will be ignored in any case. + nameOrID = "import" + } + + response, err := conn.DoRequest(ctx, r, http.MethodPost, "/containers/%s/restore", params, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return &report, response.Process(&report) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/commit.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/commit.go new file mode 100644 index 00000000000..1a85bfc3807 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/commit.go @@ -0,0 +1,34 @@ +package containers + +import ( + "context" + "net/http" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" +) + +// Commit creates a container image from a container. The container is defined by nameOrID. Use +// the CommitOptions for finer grain control on characteristics of the resulting image. +func Commit(ctx context.Context, nameOrID string, options *CommitOptions) (entities.IDResponse, error) { + if options == nil { + options = new(CommitOptions) + } + id := entities.IDResponse{} + conn, err := bindings.GetClient(ctx) + if err != nil { + return id, err + } + params, err := options.ToParams() + if err != nil { + return entities.IDResponse{}, err + } + params.Set("container", nameOrID) + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/commit", params, nil) + if err != nil { + return id, err + } + defer response.Body.Close() + + return id, response.Process(&id) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go new file mode 100644 index 00000000000..be421cc8b95 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go @@ -0,0 +1,473 @@ +package containers + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" + + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/api/handlers" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + ErrLostSync = errors.New("lost synchronization with multiplexed stream") +) + +// List obtains a list of containers in local storage. All parameters to this method are optional. +// The filters are used to determine which containers are listed. The last parameter indicates to only return +// the most recent number of containers. The pod and size booleans indicate that pod information and rootfs +// size information should also be included. Finally, the sync bool synchronizes the OCI runtime and +// container state. +func List(ctx context.Context, options *ListOptions) ([]entities.ListContainer, error) { // nolint:typecheck + if options == nil { + options = new(ListOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + var containers []entities.ListContainer + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/json", params, nil) + if err != nil { + return containers, err + } + defer response.Body.Close() + + return containers, response.Process(&containers) +} + +// Prune removes stopped and exited containers from local storage. The optional filters can be +// used for more granular selection of containers. The main error returned indicates if there were runtime +// errors like finding containers. Errors specific to the removal of a container are in the PruneContainerResponse +// structure. +func Prune(ctx context.Context, options *PruneOptions) ([]*reports.PruneReport, error) { + if options == nil { + options = new(PruneOptions) + } + var reports []*reports.PruneReport + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/prune", params, nil) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return reports, response.Process(&reports) +} + +// Remove removes a container from local storage. The force bool designates +// that the container should be removed forcibly (example, even it is running). +// The volumes bool dictates that a container's volumes should also be removed. +// The All option indicates that all containers should be removed +// The Ignore option indicates that if a container did not exist, ignore the error +func Remove(ctx context.Context, nameOrID string, options *RemoveOptions) ([]*reports.RmReport, error) { + if options == nil { + options = new(RemoveOptions) + } + var reports []*reports.RmReport + conn, err := bindings.GetClient(ctx) + if err != nil { + return reports, err + } + params, err := options.ToParams() + if err != nil { + return reports, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodDelete, "/containers/%s", params, nil, nameOrID) + if err != nil { + return reports, err + } + defer response.Body.Close() + + return reports, response.Process(&reports) +} + +// Inspect returns low level information about a Container. The nameOrID can be a container name +// or a partial/full ID. The size bool determines whether the size of the container's root filesystem +// should be calculated. Calculating the size of a container requires extra work from the filesystem and +// is therefore slower. +func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*define.InspectContainerData, error) { + if options == nil { + options = new(InspectOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/json", params, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + inspect := define.InspectContainerData{} + return &inspect, response.Process(&inspect) +} + +// Kill sends a given signal to a given container. The signal should be the string +// representation of a signal like 'SIGKILL'. The nameOrID can be a container name +// or a partial/full ID +func Kill(ctx context.Context, nameOrID string, options *KillOptions) error { + if options == nil { + options = new(KillOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params, err := options.ToParams() + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/kill", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Pause pauses a given container. The nameOrID can be a container name +// or a partial/full ID. +func Pause(ctx context.Context, nameOrID string, options *PauseOptions) error { + if options == nil { + options = new(PauseOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/pause", nil, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Restart restarts a running container. The nameOrID can be a container name +// or a partial/full ID. The optional timeout specifies the number of seconds to wait +// for the running container to stop before killing it. +func Restart(ctx context.Context, nameOrID string, options *RestartOptions) error { + if options == nil { + options = new(RestartOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params, err := options.ToParams() + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/restart", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Start starts a non-running container.The nameOrID can be a container name +// or a partial/full ID. The optional parameter for detach keys are to override the default +// detach key sequence. +func Start(ctx context.Context, nameOrID string, options *StartOptions) error { + if options == nil { + options = new(StartOptions) + } + logrus.Infof("Going to start container %q", nameOrID) + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params, err := options.ToParams() + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/start", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +func Stats(ctx context.Context, containers []string, options *StatsOptions) (chan entities.ContainerStatsReport, error) { + if options == nil { + options = new(StatsOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + for _, c := range containers { + params.Add("containers", c) + } + + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/stats", params, nil) + if err != nil { + return nil, err + } + if !response.IsSuccess() { + return nil, response.Process(nil) + } + + statsChan := make(chan entities.ContainerStatsReport) + + go func() { + defer close(statsChan) + defer response.Body.Close() + + dec := json.NewDecoder(response.Body) + doStream := true + if options.Changed("Stream") { + doStream = options.GetStream() + } + + streamLabel: // label to flatten the scope + select { + case <-response.Request.Context().Done(): + return // lost connection - maybe the server quit + default: + // fall through and do some work + } + + var report entities.ContainerStatsReport + if err := dec.Decode(&report); err != nil { + report = entities.ContainerStatsReport{Error: err} + } + statsChan <- report + + if report.Error != nil || !doStream { + return + } + goto streamLabel + }() + + return statsChan, nil +} + +// Top gathers statistics about the running processes in a container. The nameOrID can be a container name +// or a partial/full ID. The descriptors allow for specifying which data to collect from the process. +func Top(ctx context.Context, nameOrID string, options *TopOptions) ([]string, error) { + if options == nil { + options = new(TopOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params := url.Values{} + if options.Changed("Descriptors") { + psArgs := strings.Join(options.GetDescriptors(), ",") + params.Add("ps_args", psArgs) + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/top", params, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + body := handlers.ContainerTopOKBody{} + if err = response.Process(&body); err != nil { + return nil, err + } + + // handlers.ContainerTopOKBody{} returns a slice of slices where each cell in the top table is an item. + // In libpod land, we're just using a slice with cells being split by tabs, which allows for an idiomatic + // usage of the tabwriter. + topOutput := []string{strings.Join(body.Titles, "\t")} + for _, out := range body.Processes { + topOutput = append(topOutput, strings.Join(out, "\t")) + } + + return topOutput, err +} + +// Unpause resumes the given paused container. The nameOrID can be a container name +// or a partial/full ID. +func Unpause(ctx context.Context, nameOrID string, options *UnpauseOptions) error { + if options == nil { + options = new(UnpauseOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/unpause", nil, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Wait blocks until the given container reaches a condition. If not provided, the condition will +// default to stopped. If the condition is stopped, an exit code for the container will be provided. The +// nameOrID can be a container name or a partial/full ID. +func Wait(ctx context.Context, nameOrID string, options *WaitOptions) (int32, error) { // nolint + if options == nil { + options = new(WaitOptions) + } + var exitCode int32 + conn, err := bindings.GetClient(ctx) + if err != nil { + return exitCode, err + } + params, err := options.ToParams() + if err != nil { + return exitCode, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/wait", params, nil, nameOrID) + if err != nil { + return exitCode, err + } + defer response.Body.Close() + + return exitCode, response.Process(&exitCode) +} + +// Exists is a quick, light-weight way to determine if a given container +// exists in local storage. The nameOrID can be a container name +// or a partial/full ID. +func Exists(ctx context.Context, nameOrID string, options *ExistsOptions) (bool, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return false, err + } + params, err := options.ToParams() + if err != nil { + return false, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/exists", params, nil, nameOrID) + if err != nil { + return false, err + } + defer response.Body.Close() + + return response.IsSuccess(), nil +} + +// Stop stops a running container. The timeout is optional. The nameOrID can be a container name +// or a partial/full ID +func Stop(ctx context.Context, nameOrID string, options *StopOptions) error { + if options == nil { + options = new(StopOptions) + } + params, err := options.ToParams() + if err != nil { + return err + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/stop", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Export creates a tarball of the given name or ID of a container. It +// requires an io.Writer be provided to write the tarball. +func Export(ctx context.Context, nameOrID string, w io.Writer, options *ExportOptions) error { + if options == nil { + options = new(ExportOptions) + } + _ = options + params := url.Values{} + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/export", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + if response.StatusCode/100 == 2 { + _, err = io.Copy(w, response.Body) + return err + } + return response.Process(nil) +} + +// ContainerInit takes a created container and executes all of the +// preparations to run the container except it will not start +// or attach to the container +func ContainerInit(ctx context.Context, nameOrID string, options *InitOptions) error { + if options == nil { + options = new(InitOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/init", nil, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + if response.StatusCode == http.StatusNotModified { + return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", nameOrID) + } + return response.Process(nil) +} + +func ShouldRestart(ctx context.Context, nameOrID string, options *ShouldRestartOptions) (bool, error) { + if options == nil { + options = new(ShouldRestartOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return false, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/shouldrestart", nil, nil, nameOrID) + if err != nil { + return false, err + } + defer response.Body.Close() + + return response.IsSuccess(), nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/create.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/create.go new file mode 100644 index 00000000000..9c090f67d4f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/create.go @@ -0,0 +1,36 @@ +package containers + +import ( + "context" + "net/http" + "strings" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v4/pkg/specgen" + jsoniter "github.com/json-iterator/go" +) + +func CreateWithSpec(ctx context.Context, s *specgen.SpecGenerator, options *CreateOptions) (entities.ContainerCreateResponse, error) { + var ccr entities.ContainerCreateResponse + if options == nil { + options = new(CreateOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return ccr, err + } + specgenString, err := jsoniter.MarshalToString(s) + if err != nil { + return ccr, err + } + stringReader := strings.NewReader(specgenString) + response, err := conn.DoRequest(ctx, stringReader, http.MethodPost, "/containers/create", nil, nil) + if err != nil { + return ccr, err + } + defer response.Body.Close() + + return ccr, response.Process(&ccr) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/diff.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/diff.go new file mode 100644 index 00000000000..2eb13088c15 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/diff.go @@ -0,0 +1,33 @@ +package containers + +import ( + "context" + "net/http" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/storage/pkg/archive" +) + +// Diff provides the changes between two container layers +func Diff(ctx context.Context, nameOrID string, options *DiffOptions) ([]archive.Change, error) { + if options == nil { + options = new(DiffOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/changes", params, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + var changes []archive.Change + return changes, response.Process(&changes) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go new file mode 100644 index 00000000000..3ad5d67d24d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go @@ -0,0 +1,112 @@ +package containers + +import ( + "bytes" + "context" + "net/http" + "strings" + + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/api/handlers" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// ExecCreate creates a new exec session in an existing container. +// The exec session will not be started; that is done with ExecStart. +// Returns ID of new exec session, or an error if one occurred. +func ExecCreate(ctx context.Context, nameOrID string, config *handlers.ExecCreateConfig) (string, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return "", err + } + + if config == nil { + return "", errors.Errorf("must provide a configuration for exec session") + } + + requestJSON, err := json.Marshal(config) + if err != nil { + return "", errors.Wrapf(err, "error marshalling exec config to JSON") + } + jsonReader := strings.NewReader(string(requestJSON)) + + resp, err := conn.DoRequest(ctx, jsonReader, http.MethodPost, "/containers/%s/exec", nil, nil, nameOrID) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respStruct := new(entities.IDResponse) + if err := resp.Process(respStruct); err != nil { + return "", err + } + + return respStruct.ID, nil +} + +// ExecInspect inspects an existing exec session, returning detailed information +// about it. +func ExecInspect(ctx context.Context, sessionID string, options *ExecInspectOptions) (*define.InspectExecSession, error) { + if options == nil { + options = new(ExecInspectOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + + logrus.Debugf("Inspecting session ID %q", sessionID) + + resp, err := conn.DoRequest(ctx, nil, http.MethodGet, "/exec/%s/json", nil, nil, sessionID) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + respStruct := new(define.InspectExecSession) + if err := resp.Process(respStruct); err != nil { + return nil, err + } + + return respStruct, nil +} + +// ExecStart starts (but does not attach to) a given exec session. +func ExecStart(ctx context.Context, sessionID string, options *ExecStartOptions) error { + if options == nil { + options = new(ExecStartOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + + logrus.Debugf("Starting exec session ID %q", sessionID) + + // We force Detach to true + body := struct { + Detach bool `json:"Detach"` + }{ + Detach: true, + } + bodyJSON, err := json.Marshal(body) + if err != nil { + return err + } + + resp, err := conn.DoRequest(ctx, bytes.NewReader(bodyJSON), http.MethodPost, "/exec/%s/start", nil, nil, sessionID) + if err != nil { + return err + } + defer resp.Body.Close() + + return resp.Process(nil) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/healthcheck.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/healthcheck.go new file mode 100644 index 00000000000..e0680238d42 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/healthcheck.go @@ -0,0 +1,32 @@ +package containers + +import ( + "context" + "net/http" + + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/bindings" +) + +// RunHealthCheck executes the container's healthcheck and returns the health status of the +// container. +func RunHealthCheck(ctx context.Context, nameOrID string, options *HealthCheckOptions) (*define.HealthCheckResults, error) { + if options == nil { + options = new(HealthCheckOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + var ( + status define.HealthCheckResults + ) + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/healthcheck", nil, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return &status, response.Process(&status) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go new file mode 100644 index 00000000000..8ea8ed7faea --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go @@ -0,0 +1,65 @@ +package containers + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/pkg/errors" +) + +// Logs obtains a container's logs given the options provided. The logs are then sent to the +// stdout|stderr channels as strings. +func Logs(ctx context.Context, nameOrID string, options *LogOptions, stdoutChan, stderrChan chan string) error { + if options == nil { + options = new(LogOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params, err := options.ToParams() + if err != nil { + return err + } + // The API requires either stdout|stderr be used. If neither are specified, we specify stdout + if options.Stdout == nil && options.Stderr == nil { + params.Set("stdout", strconv.FormatBool(true)) + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/%s/logs", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + buffer := make([]byte, 1024) + for { + fd, l, err := DemuxHeader(response.Body, buffer) + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return nil + } + return err + } + frame, err := DemuxFrame(response.Body, buffer, l) + if err != nil { + return err + } + + switch fd { + case 0: + stdoutChan <- string(frame) + case 1: + stdoutChan <- string(frame) + case 2: + stderrChan <- string(frame) + case 3: + return errors.New("from service in stream: " + string(frame)) + default: + return fmt.Errorf("unrecognized input header: %d", fd) + } + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/mount.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/mount.go new file mode 100644 index 00000000000..de286e4b4d1 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/mount.go @@ -0,0 +1,71 @@ +package containers + +import ( + "context" + "net/http" + + "github.com/containers/podman/v4/pkg/bindings" +) + +// Mount mounts an existing container to the filesystem. It returns the path +// of the mounted container in string format. +func Mount(ctx context.Context, nameOrID string, options *MountOptions) (string, error) { + if options == nil { + options = new(MountOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return "", err + } + var ( + path string + ) + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/mount", nil, nil, nameOrID) + if err != nil { + return path, err + } + defer response.Body.Close() + + return path, response.Process(&path) +} + +// Unmount unmounts a container from the filesystem. The container must not be running +// or the unmount will fail. +func Unmount(ctx context.Context, nameOrID string, options *UnmountOptions) error { + if options == nil { + options = new(UnmountOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/unmount", nil, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// GetMountedContainerPaths returns a map of mounted containers and their mount locations. +func GetMountedContainerPaths(ctx context.Context, options *MountedContainerPathsOptions) (map[string]string, error) { + if options == nil { + options = new(MountedContainerPathsOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + mounts := make(map[string]string) + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/containers/showmounted", nil, nil) + if err != nil { + return mounts, err + } + defer response.Body.Close() + + return mounts, response.Process(&mounts) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/rename.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/rename.go new file mode 100644 index 00000000000..7cc16e33439 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/rename.go @@ -0,0 +1,30 @@ +package containers + +import ( + "context" + "net/http" + + "github.com/containers/podman/v4/pkg/bindings" +) + +// Rename an existing container. +func Rename(ctx context.Context, nameOrID string, options *RenameOptions) error { + if options == nil { + options = new(RenameOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params, err := options.ToParams() + if err != nil { + return err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/containers/%s/rename", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_unix.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_unix.go new file mode 100644 index 00000000000..e14f5081381 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_unix.go @@ -0,0 +1,25 @@ +//go:build !windows +// +build !windows + +package containers + +import ( + "context" + "os" + "os/signal" + + sig "github.com/containers/podman/v4/pkg/signal" + "golang.org/x/term" +) + +func makeRawTerm(stdin *os.File) (*term.State, error) { + return term.MakeRaw(int(stdin.Fd())) +} + +func notifyWinChange(ctx context.Context, winChange chan os.Signal, stdin *os.File, stdout *os.File) { + signal.Notify(winChange, sig.SIGWINCH) +} + +func getTermSize(stdin *os.File, stdout *os.File) (width, height int, err error) { + return term.GetSize(int(stdin.Fd())) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_windows.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_windows.go new file mode 100644 index 00000000000..e710e29986a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_windows.go @@ -0,0 +1,69 @@ +package containers + +import ( + "context" + "os" + "time" + + sig "github.com/containers/podman/v4/pkg/signal" + "golang.org/x/sys/windows" + "golang.org/x/term" +) + +func makeRawTerm(stdin *os.File) (*term.State, error) { + state, err := term.MakeRaw(int(stdin.Fd())) + if err != nil { + return nil, err + } + + // Attempt VT if supported (recent versions of Windows 10+) + var raw uint32 + handle := windows.Handle(stdin.Fd()) + if err := windows.GetConsoleMode(handle, &raw); err != nil { + return nil, err + } + + tryVT := raw | windows.ENABLE_VIRTUAL_TERMINAL_INPUT + + if err := windows.SetConsoleMode(handle, tryVT); err != nil { + if err := windows.SetConsoleMode(handle, raw); err != nil { + return nil, err + } + } + + return state, nil +} + +func notifyWinChange(ctx context.Context, winChange chan os.Signal, stdin *os.File, stdout *os.File) { + // Simulate WINCH with polling + go func() { + var lastW int + var lastH int + + d := time.Millisecond * 250 + timer := time.NewTimer(d) + defer timer.Stop() + for ; ; timer.Reset(d) { + select { + case <-ctx.Done(): + return + case <-timer.C: + break + } + + w, h, err := term.GetSize(int(stdout.Fd())) + if err != nil { + continue + } + if w != lastW || h != lastH { + winChange <- sig.SIGWINCH + lastW, lastH = w, h + } + } + }() + +} + +func getTermSize(stdin *os.File, stdout *os.File) (width, height int, err error) { + return term.GetSize(int(stdout.Fd())) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go new file mode 100644 index 00000000000..81d491bb7d9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go @@ -0,0 +1,290 @@ +package containers + +import ( + "bufio" + "io" + + "github.com/containers/podman/v4/libpod/define" +) + +//go:generate go run ../generator/generator.go LogOptions +// LogOptions describe finer control of log content or +// how the content is formatted. +type LogOptions struct { + Follow *bool + Since *string + Stderr *bool + Stdout *bool + Tail *string + Timestamps *bool + Until *string +} + +//go:generate go run ../generator/generator.go CommitOptions +// CommitOptions describe details about the resulting committed +// image as defined by repo and tag. None of these options +// are required. +type CommitOptions struct { + Author *string + Changes []string + Comment *string + Format *string + Pause *bool + Squash *bool + Repo *string + Tag *string +} + +//go:generate go run ../generator/generator.go AttachOptions +// AttachOptions are optional options for attaching to containers +type AttachOptions struct { + DetachKeys *string // Keys to detach from running container + Logs *bool // Flag to return all logs from container when true + Stream *bool // Flag only return container logs when false and Logs is true +} + +//go:generate go run ../generator/generator.go CheckpointOptions +// CheckpointOptions are optional options for checkpointing containers +type CheckpointOptions struct { + Export *string + CreateImage *string + IgnoreRootfs *bool + Keep *bool + LeaveRunning *bool + TCPEstablished *bool + PrintStats *bool + PreCheckpoint *bool + WithPrevious *bool + FileLocks *bool +} + +//go:generate go run ../generator/generator.go RestoreOptions +// RestoreOptions are optional options for restoring containers +type RestoreOptions struct { + IgnoreRootfs *bool + IgnoreVolumes *bool + IgnoreStaticIP *bool + IgnoreStaticMAC *bool + // ImportAchive is the path to an archive which contains the checkpoint data. + // + // Deprecated: Use ImportArchive instead. This field name is a typo and + // will be removed in a future major release. + ImportAchive *string + // ImportArchive is the path to an archive which contains the checkpoint data. + // ImportArchive is preferred over ImportAchive when both are set. + ImportArchive *string + Keep *bool + Name *string + TCPEstablished *bool + Pod *string + PrintStats *bool + PublishPorts []string + FileLocks *bool +} + +//go:generate go run ../generator/generator.go CreateOptions +// CreateOptions are optional options for creating containers +type CreateOptions struct{} + +//go:generate go run ../generator/generator.go DiffOptions +// DiffOptions are optional options for creating containers +type DiffOptions struct { + // By the default diff will compare against the parent layer. Change the Parent if you want to compare against something else. + Parent *string + // Change the type the backend should match. This can be set to "all", "container" or "image". + DiffType *string +} + +//go:generate go run ../generator/generator.go ExecInspectOptions +// ExecInspectOptions are optional options for inspecting +// exec sessions +type ExecInspectOptions struct{} + +//go:generate go run ../generator/generator.go ExecStartOptions +// ExecStartOptions are optional options for starting +// exec sessions +type ExecStartOptions struct { +} + +//go:generate go run ../generator/generator.go HealthCheckOptions +// HealthCheckOptions are optional options for checking +// the health of a container +type HealthCheckOptions struct{} + +//go:generate go run ../generator/generator.go MountOptions +// MountOptions are optional options for mounting +// containers +type MountOptions struct{} + +//go:generate go run ../generator/generator.go UnmountOptions +// UnmountOptions are optional options for unmounting +// containers +type UnmountOptions struct{} + +//go:generate go run ../generator/generator.go MountedContainerPathsOptions +// MountedContainerPathsOptions are optional options for getting +// container mount paths +type MountedContainerPathsOptions struct{} + +//go:generate go run ../generator/generator.go ListOptions +// ListOptions are optional options for listing containers +type ListOptions struct { + All *bool + External *bool + Filters map[string][]string + Last *int + Namespace *bool + Size *bool + Sync *bool +} + +//go:generate go run ../generator/generator.go PruneOptions +// PruneOptions are optional options for pruning containers +type PruneOptions struct { + Filters map[string][]string +} + +//go:generate go run ../generator/generator.go RemoveOptions +// RemoveOptions are optional options for removing containers +type RemoveOptions struct { + Depend *bool + Ignore *bool + Force *bool + Volumes *bool + Timeout *uint +} + +//go:generate go run ../generator/generator.go InspectOptions +// InspectOptions are optional options for inspecting containers +type InspectOptions struct { + Size *bool +} + +//go:generate go run ../generator/generator.go KillOptions +// KillOptions are optional options for killing containers +type KillOptions struct { + Signal *string +} + +//go:generate go run ../generator/generator.go PauseOptions +// PauseOptions are optional options for pausing containers +type PauseOptions struct{} + +//go:generate go run ../generator/generator.go RestartOptions +// RestartOptions are optional options for restarting containers +type RestartOptions struct { + Timeout *int +} + +//go:generate go run ../generator/generator.go StartOptions +// StartOptions are optional options for starting containers +type StartOptions struct { + DetachKeys *string + Recursive *bool +} + +//go:generate go run ../generator/generator.go StatsOptions +// StatsOptions are optional options for getting stats on containers +type StatsOptions struct { + Stream *bool + Interval *int +} + +//go:generate go run ../generator/generator.go TopOptions +// TopOptions are optional options for getting running +// processes in containers +type TopOptions struct { + Descriptors *[]string +} + +//go:generate go run ../generator/generator.go UnpauseOptions +// UnpauseOptions are optional options for unpausing containers +type UnpauseOptions struct{} + +//go:generate go run ../generator/generator.go WaitOptions +// WaitOptions are optional options for waiting on containers +type WaitOptions struct { + Condition []define.ContainerStatus + Interval *string +} + +//go:generate go run ../generator/generator.go StopOptions +// StopOptions are optional options for stopping containers +type StopOptions struct { + Ignore *bool + Timeout *uint +} + +//go:generate go run ../generator/generator.go ExportOptions +// ExportOptions are optional options for exporting containers +type ExportOptions struct{} + +//go:generate go run ../generator/generator.go InitOptions +// InitOptions are optional options for initing containers +type InitOptions struct{} + +//go:generate go run ../generator/generator.go ShouldRestartOptions +// ShouldRestartOptions +type ShouldRestartOptions struct{} + +//go:generate go run ../generator/generator.go RenameOptions +// RenameOptions are options for renaming containers. +// The Name field is required. +type RenameOptions struct { + Name *string +} + +//go:generate go run ../generator/generator.go ResizeTTYOptions +// ResizeTTYOptions are optional options for resizing +// container TTYs +type ResizeTTYOptions struct { + Height *int + Width *int + Running *bool +} + +//go:generate go run ../generator/generator.go ResizeExecTTYOptions +// ResizeExecTTYOptions are optional options for resizing +// container ExecTTYs +type ResizeExecTTYOptions struct { + Height *int + Width *int +} + +//go:generate go run ../generator/generator.go ExecStartAndAttachOptions +// ExecStartAndAttachOptions are optional options for resizing +// container ExecTTYs +type ExecStartAndAttachOptions struct { + // OutputStream will be attached to container's STDOUT + OutputStream *io.WriteCloser + // ErrorStream will be attached to container's STDERR + ErrorStream *io.WriteCloser + // InputStream will be attached to container's STDIN + InputStream *bufio.Reader + // AttachOutput is whether to attach to STDOUT + // If false, stdout will not be attached + AttachOutput *bool + // AttachError is whether to attach to STDERR + // If false, stdout will not be attached + AttachError *bool + // AttachInput is whether to attach to STDIN + // If false, stdout will not be attached + AttachInput *bool +} + +//go:generate go run ../generator/generator.go ExistsOptions +// ExistsOptions are optional options for checking if a container exists +type ExistsOptions struct { + // External checks for containers created outside of Podman + External *bool +} + +//go:generate go run ../generator/generator.go CopyOptions +// CopyOptions are options for copying to containers. +type CopyOptions struct { + // If used with CopyFromArchive and set to true it will change ownership of files from the source tar archive + // to the primary uid/gid of the target container. + Chown *bool `schema:"copyUIDGID"` + // Map to translate path names. + Rename map[string]string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_attach_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_attach_options.go new file mode 100644 index 00000000000..9fa21ef4e14 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_attach_options.go @@ -0,0 +1,63 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *AttachOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *AttachOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithDetachKeys set keys to detach from running container +func (o *AttachOptions) WithDetachKeys(value string) *AttachOptions { + o.DetachKeys = &value + return o +} + +// GetDetachKeys returns value of keys to detach from running container +func (o *AttachOptions) GetDetachKeys() string { + if o.DetachKeys == nil { + var z string + return z + } + return *o.DetachKeys +} + +// WithLogs set flag to return all logs from container when true +func (o *AttachOptions) WithLogs(value bool) *AttachOptions { + o.Logs = &value + return o +} + +// GetLogs returns value of flag to return all logs from container when true +func (o *AttachOptions) GetLogs() bool { + if o.Logs == nil { + var z bool + return z + } + return *o.Logs +} + +// WithStream set flag only return container logs when false and Logs is true +func (o *AttachOptions) WithStream(value bool) *AttachOptions { + o.Stream = &value + return o +} + +// GetStream returns value of flag only return container logs when false and Logs is true +func (o *AttachOptions) GetStream() bool { + if o.Stream == nil { + var z bool + return z + } + return *o.Stream +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_checkpoint_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_checkpoint_options.go new file mode 100644 index 00000000000..d5f6e541d1e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_checkpoint_options.go @@ -0,0 +1,168 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *CheckpointOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *CheckpointOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithExport set field Export to given value +func (o *CheckpointOptions) WithExport(value string) *CheckpointOptions { + o.Export = &value + return o +} + +// GetExport returns value of field Export +func (o *CheckpointOptions) GetExport() string { + if o.Export == nil { + var z string + return z + } + return *o.Export +} + +// WithCreateImage set field CreateImage to given value +func (o *CheckpointOptions) WithCreateImage(value string) *CheckpointOptions { + o.CreateImage = &value + return o +} + +// GetCreateImage returns value of field CreateImage +func (o *CheckpointOptions) GetCreateImage() string { + if o.CreateImage == nil { + var z string + return z + } + return *o.CreateImage +} + +// WithIgnoreRootfs set field IgnoreRootfs to given value +func (o *CheckpointOptions) WithIgnoreRootfs(value bool) *CheckpointOptions { + o.IgnoreRootfs = &value + return o +} + +// GetIgnoreRootfs returns value of field IgnoreRootfs +func (o *CheckpointOptions) GetIgnoreRootfs() bool { + if o.IgnoreRootfs == nil { + var z bool + return z + } + return *o.IgnoreRootfs +} + +// WithKeep set field Keep to given value +func (o *CheckpointOptions) WithKeep(value bool) *CheckpointOptions { + o.Keep = &value + return o +} + +// GetKeep returns value of field Keep +func (o *CheckpointOptions) GetKeep() bool { + if o.Keep == nil { + var z bool + return z + } + return *o.Keep +} + +// WithLeaveRunning set field LeaveRunning to given value +func (o *CheckpointOptions) WithLeaveRunning(value bool) *CheckpointOptions { + o.LeaveRunning = &value + return o +} + +// GetLeaveRunning returns value of field LeaveRunning +func (o *CheckpointOptions) GetLeaveRunning() bool { + if o.LeaveRunning == nil { + var z bool + return z + } + return *o.LeaveRunning +} + +// WithTCPEstablished set field TCPEstablished to given value +func (o *CheckpointOptions) WithTCPEstablished(value bool) *CheckpointOptions { + o.TCPEstablished = &value + return o +} + +// GetTCPEstablished returns value of field TCPEstablished +func (o *CheckpointOptions) GetTCPEstablished() bool { + if o.TCPEstablished == nil { + var z bool + return z + } + return *o.TCPEstablished +} + +// WithPrintStats set field PrintStats to given value +func (o *CheckpointOptions) WithPrintStats(value bool) *CheckpointOptions { + o.PrintStats = &value + return o +} + +// GetPrintStats returns value of field PrintStats +func (o *CheckpointOptions) GetPrintStats() bool { + if o.PrintStats == nil { + var z bool + return z + } + return *o.PrintStats +} + +// WithPreCheckpoint set field PreCheckpoint to given value +func (o *CheckpointOptions) WithPreCheckpoint(value bool) *CheckpointOptions { + o.PreCheckpoint = &value + return o +} + +// GetPreCheckpoint returns value of field PreCheckpoint +func (o *CheckpointOptions) GetPreCheckpoint() bool { + if o.PreCheckpoint == nil { + var z bool + return z + } + return *o.PreCheckpoint +} + +// WithWithPrevious set field WithPrevious to given value +func (o *CheckpointOptions) WithWithPrevious(value bool) *CheckpointOptions { + o.WithPrevious = &value + return o +} + +// GetWithPrevious returns value of field WithPrevious +func (o *CheckpointOptions) GetWithPrevious() bool { + if o.WithPrevious == nil { + var z bool + return z + } + return *o.WithPrevious +} + +// WithFileLocks set field FileLocks to given value +func (o *CheckpointOptions) WithFileLocks(value bool) *CheckpointOptions { + o.FileLocks = &value + return o +} + +// GetFileLocks returns value of field FileLocks +func (o *CheckpointOptions) GetFileLocks() bool { + if o.FileLocks == nil { + var z bool + return z + } + return *o.FileLocks +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_commit_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_commit_options.go new file mode 100644 index 00000000000..7b4745eb885 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_commit_options.go @@ -0,0 +1,138 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *CommitOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *CommitOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAuthor set field Author to given value +func (o *CommitOptions) WithAuthor(value string) *CommitOptions { + o.Author = &value + return o +} + +// GetAuthor returns value of field Author +func (o *CommitOptions) GetAuthor() string { + if o.Author == nil { + var z string + return z + } + return *o.Author +} + +// WithChanges set field Changes to given value +func (o *CommitOptions) WithChanges(value []string) *CommitOptions { + o.Changes = value + return o +} + +// GetChanges returns value of field Changes +func (o *CommitOptions) GetChanges() []string { + if o.Changes == nil { + var z []string + return z + } + return o.Changes +} + +// WithComment set field Comment to given value +func (o *CommitOptions) WithComment(value string) *CommitOptions { + o.Comment = &value + return o +} + +// GetComment returns value of field Comment +func (o *CommitOptions) GetComment() string { + if o.Comment == nil { + var z string + return z + } + return *o.Comment +} + +// WithFormat set field Format to given value +func (o *CommitOptions) WithFormat(value string) *CommitOptions { + o.Format = &value + return o +} + +// GetFormat returns value of field Format +func (o *CommitOptions) GetFormat() string { + if o.Format == nil { + var z string + return z + } + return *o.Format +} + +// WithPause set field Pause to given value +func (o *CommitOptions) WithPause(value bool) *CommitOptions { + o.Pause = &value + return o +} + +// GetPause returns value of field Pause +func (o *CommitOptions) GetPause() bool { + if o.Pause == nil { + var z bool + return z + } + return *o.Pause +} + +// WithSquash set field Squash to given value +func (o *CommitOptions) WithSquash(value bool) *CommitOptions { + o.Squash = &value + return o +} + +// GetSquash returns value of field Squash +func (o *CommitOptions) GetSquash() bool { + if o.Squash == nil { + var z bool + return z + } + return *o.Squash +} + +// WithRepo set field Repo to given value +func (o *CommitOptions) WithRepo(value string) *CommitOptions { + o.Repo = &value + return o +} + +// GetRepo returns value of field Repo +func (o *CommitOptions) GetRepo() string { + if o.Repo == nil { + var z string + return z + } + return *o.Repo +} + +// WithTag set field Tag to given value +func (o *CommitOptions) WithTag(value string) *CommitOptions { + o.Tag = &value + return o +} + +// GetTag returns value of field Tag +func (o *CommitOptions) GetTag() string { + if o.Tag == nil { + var z string + return z + } + return *o.Tag +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go new file mode 100644 index 00000000000..8fcfe71a60c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *CopyOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *CopyOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithChown set field Chown to given value +func (o *CopyOptions) WithChown(value bool) *CopyOptions { + o.Chown = &value + return o +} + +// GetChown returns value of field Chown +func (o *CopyOptions) GetChown() bool { + if o.Chown == nil { + var z bool + return z + } + return *o.Chown +} + +// WithRename set field Rename to given value +func (o *CopyOptions) WithRename(value map[string]string) *CopyOptions { + o.Rename = value + return o +} + +// GetRename returns value of field Rename +func (o *CopyOptions) GetRename() map[string]string { + if o.Rename == nil { + var z map[string]string + return z + } + return o.Rename +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_create_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_create_options.go new file mode 100644 index 00000000000..57896b9ef99 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_create_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *CreateOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *CreateOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_diff_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_diff_options.go new file mode 100644 index 00000000000..5fc3dedae2a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_diff_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *DiffOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *DiffOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithParent set field Parent to given value +func (o *DiffOptions) WithParent(value string) *DiffOptions { + o.Parent = &value + return o +} + +// GetParent returns value of field Parent +func (o *DiffOptions) GetParent() string { + if o.Parent == nil { + var z string + return z + } + return *o.Parent +} + +// WithDiffType set field DiffType to given value +func (o *DiffOptions) WithDiffType(value string) *DiffOptions { + o.DiffType = &value + return o +} + +// GetDiffType returns value of field DiffType +func (o *DiffOptions) GetDiffType() string { + if o.DiffType == nil { + var z string + return z + } + return *o.DiffType +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execinspect_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execinspect_options.go new file mode 100644 index 00000000000..0e926be0c1c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execinspect_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ExecInspectOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ExecInspectOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstart_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstart_options.go new file mode 100644 index 00000000000..4bd66e25bf0 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstart_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ExecStartOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ExecStartOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstartandattach_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstartandattach_options.go new file mode 100644 index 00000000000..df7ac45d1c6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstartandattach_options.go @@ -0,0 +1,110 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "bufio" + "io" + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ExecStartAndAttachOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ExecStartAndAttachOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithOutputStream set field OutputStream to given value +func (o *ExecStartAndAttachOptions) WithOutputStream(value io.WriteCloser) *ExecStartAndAttachOptions { + o.OutputStream = &value + return o +} + +// GetOutputStream returns value of field OutputStream +func (o *ExecStartAndAttachOptions) GetOutputStream() io.WriteCloser { + if o.OutputStream == nil { + var z io.WriteCloser + return z + } + return *o.OutputStream +} + +// WithErrorStream set field ErrorStream to given value +func (o *ExecStartAndAttachOptions) WithErrorStream(value io.WriteCloser) *ExecStartAndAttachOptions { + o.ErrorStream = &value + return o +} + +// GetErrorStream returns value of field ErrorStream +func (o *ExecStartAndAttachOptions) GetErrorStream() io.WriteCloser { + if o.ErrorStream == nil { + var z io.WriteCloser + return z + } + return *o.ErrorStream +} + +// WithInputStream set field InputStream to given value +func (o *ExecStartAndAttachOptions) WithInputStream(value bufio.Reader) *ExecStartAndAttachOptions { + o.InputStream = &value + return o +} + +// GetInputStream returns value of field InputStream +func (o *ExecStartAndAttachOptions) GetInputStream() bufio.Reader { + if o.InputStream == nil { + var z bufio.Reader + return z + } + return *o.InputStream +} + +// WithAttachOutput set field AttachOutput to given value +func (o *ExecStartAndAttachOptions) WithAttachOutput(value bool) *ExecStartAndAttachOptions { + o.AttachOutput = &value + return o +} + +// GetAttachOutput returns value of field AttachOutput +func (o *ExecStartAndAttachOptions) GetAttachOutput() bool { + if o.AttachOutput == nil { + var z bool + return z + } + return *o.AttachOutput +} + +// WithAttachError set field AttachError to given value +func (o *ExecStartAndAttachOptions) WithAttachError(value bool) *ExecStartAndAttachOptions { + o.AttachError = &value + return o +} + +// GetAttachError returns value of field AttachError +func (o *ExecStartAndAttachOptions) GetAttachError() bool { + if o.AttachError == nil { + var z bool + return z + } + return *o.AttachError +} + +// WithAttachInput set field AttachInput to given value +func (o *ExecStartAndAttachOptions) WithAttachInput(value bool) *ExecStartAndAttachOptions { + o.AttachInput = &value + return o +} + +// GetAttachInput returns value of field AttachInput +func (o *ExecStartAndAttachOptions) GetAttachInput() bool { + if o.AttachInput == nil { + var z bool + return z + } + return *o.AttachInput +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_exists_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_exists_options.go new file mode 100644 index 00000000000..6c73fcc654a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_exists_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ExistsOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ExistsOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithExternal set field External to given value +func (o *ExistsOptions) WithExternal(value bool) *ExistsOptions { + o.External = &value + return o +} + +// GetExternal returns value of field External +func (o *ExistsOptions) GetExternal() bool { + if o.External == nil { + var z bool + return z + } + return *o.External +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_export_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_export_options.go new file mode 100644 index 00000000000..041a18041f2 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_export_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ExportOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ExportOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_healthcheck_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_healthcheck_options.go new file mode 100644 index 00000000000..f7daafdabae --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_healthcheck_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *HealthCheckOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *HealthCheckOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_init_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_init_options.go new file mode 100644 index 00000000000..f93422ee3ea --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_init_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *InitOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *InitOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_inspect_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_inspect_options.go new file mode 100644 index 00000000000..0fa8d0917b6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_inspect_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *InspectOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *InspectOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithSize set field Size to given value +func (o *InspectOptions) WithSize(value bool) *InspectOptions { + o.Size = &value + return o +} + +// GetSize returns value of field Size +func (o *InspectOptions) GetSize() bool { + if o.Size == nil { + var z bool + return z + } + return *o.Size +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_kill_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_kill_options.go new file mode 100644 index 00000000000..af26b341658 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_kill_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *KillOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *KillOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithSignal set field Signal to given value +func (o *KillOptions) WithSignal(value string) *KillOptions { + o.Signal = &value + return o +} + +// GetSignal returns value of field Signal +func (o *KillOptions) GetSignal() string { + if o.Signal == nil { + var z string + return z + } + return *o.Signal +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_list_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_list_options.go new file mode 100644 index 00000000000..0204423eb24 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_list_options.go @@ -0,0 +1,123 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ListOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ListOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAll set field All to given value +func (o *ListOptions) WithAll(value bool) *ListOptions { + o.All = &value + return o +} + +// GetAll returns value of field All +func (o *ListOptions) GetAll() bool { + if o.All == nil { + var z bool + return z + } + return *o.All +} + +// WithExternal set field External to given value +func (o *ListOptions) WithExternal(value bool) *ListOptions { + o.External = &value + return o +} + +// GetExternal returns value of field External +func (o *ListOptions) GetExternal() bool { + if o.External == nil { + var z bool + return z + } + return *o.External +} + +// WithFilters set field Filters to given value +func (o *ListOptions) WithFilters(value map[string][]string) *ListOptions { + o.Filters = value + return o +} + +// GetFilters returns value of field Filters +func (o *ListOptions) GetFilters() map[string][]string { + if o.Filters == nil { + var z map[string][]string + return z + } + return o.Filters +} + +// WithLast set field Last to given value +func (o *ListOptions) WithLast(value int) *ListOptions { + o.Last = &value + return o +} + +// GetLast returns value of field Last +func (o *ListOptions) GetLast() int { + if o.Last == nil { + var z int + return z + } + return *o.Last +} + +// WithNamespace set field Namespace to given value +func (o *ListOptions) WithNamespace(value bool) *ListOptions { + o.Namespace = &value + return o +} + +// GetNamespace returns value of field Namespace +func (o *ListOptions) GetNamespace() bool { + if o.Namespace == nil { + var z bool + return z + } + return *o.Namespace +} + +// WithSize set field Size to given value +func (o *ListOptions) WithSize(value bool) *ListOptions { + o.Size = &value + return o +} + +// GetSize returns value of field Size +func (o *ListOptions) GetSize() bool { + if o.Size == nil { + var z bool + return z + } + return *o.Size +} + +// WithSync set field Sync to given value +func (o *ListOptions) WithSync(value bool) *ListOptions { + o.Sync = &value + return o +} + +// GetSync returns value of field Sync +func (o *ListOptions) GetSync() bool { + if o.Sync == nil { + var z bool + return z + } + return *o.Sync +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_log_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_log_options.go new file mode 100644 index 00000000000..4aab596d8c2 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_log_options.go @@ -0,0 +1,123 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *LogOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *LogOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithFollow set field Follow to given value +func (o *LogOptions) WithFollow(value bool) *LogOptions { + o.Follow = &value + return o +} + +// GetFollow returns value of field Follow +func (o *LogOptions) GetFollow() bool { + if o.Follow == nil { + var z bool + return z + } + return *o.Follow +} + +// WithSince set field Since to given value +func (o *LogOptions) WithSince(value string) *LogOptions { + o.Since = &value + return o +} + +// GetSince returns value of field Since +func (o *LogOptions) GetSince() string { + if o.Since == nil { + var z string + return z + } + return *o.Since +} + +// WithStderr set field Stderr to given value +func (o *LogOptions) WithStderr(value bool) *LogOptions { + o.Stderr = &value + return o +} + +// GetStderr returns value of field Stderr +func (o *LogOptions) GetStderr() bool { + if o.Stderr == nil { + var z bool + return z + } + return *o.Stderr +} + +// WithStdout set field Stdout to given value +func (o *LogOptions) WithStdout(value bool) *LogOptions { + o.Stdout = &value + return o +} + +// GetStdout returns value of field Stdout +func (o *LogOptions) GetStdout() bool { + if o.Stdout == nil { + var z bool + return z + } + return *o.Stdout +} + +// WithTail set field Tail to given value +func (o *LogOptions) WithTail(value string) *LogOptions { + o.Tail = &value + return o +} + +// GetTail returns value of field Tail +func (o *LogOptions) GetTail() string { + if o.Tail == nil { + var z string + return z + } + return *o.Tail +} + +// WithTimestamps set field Timestamps to given value +func (o *LogOptions) WithTimestamps(value bool) *LogOptions { + o.Timestamps = &value + return o +} + +// GetTimestamps returns value of field Timestamps +func (o *LogOptions) GetTimestamps() bool { + if o.Timestamps == nil { + var z bool + return z + } + return *o.Timestamps +} + +// WithUntil set field Until to given value +func (o *LogOptions) WithUntil(value string) *LogOptions { + o.Until = &value + return o +} + +// GetUntil returns value of field Until +func (o *LogOptions) GetUntil() string { + if o.Until == nil { + var z string + return z + } + return *o.Until +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mount_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mount_options.go new file mode 100644 index 00000000000..1e0b7ddbfb8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mount_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *MountOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *MountOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mountedcontainerpaths_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mountedcontainerpaths_options.go new file mode 100644 index 00000000000..62377b52b91 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mountedcontainerpaths_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *MountedContainerPathsOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *MountedContainerPathsOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_pause_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_pause_options.go new file mode 100644 index 00000000000..26ee31db07c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_pause_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *PauseOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *PauseOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_prune_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_prune_options.go new file mode 100644 index 00000000000..413b84f4739 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_prune_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *PruneOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *PruneOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithFilters set field Filters to given value +func (o *PruneOptions) WithFilters(value map[string][]string) *PruneOptions { + o.Filters = value + return o +} + +// GetFilters returns value of field Filters +func (o *PruneOptions) GetFilters() map[string][]string { + if o.Filters == nil { + var z map[string][]string + return z + } + return o.Filters +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_remove_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_remove_options.go new file mode 100644 index 00000000000..b73b01cd2d1 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_remove_options.go @@ -0,0 +1,93 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *RemoveOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *RemoveOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithDepend set field Depend to given value +func (o *RemoveOptions) WithDepend(value bool) *RemoveOptions { + o.Depend = &value + return o +} + +// GetDepend returns value of field Depend +func (o *RemoveOptions) GetDepend() bool { + if o.Depend == nil { + var z bool + return z + } + return *o.Depend +} + +// WithIgnore set field Ignore to given value +func (o *RemoveOptions) WithIgnore(value bool) *RemoveOptions { + o.Ignore = &value + return o +} + +// GetIgnore returns value of field Ignore +func (o *RemoveOptions) GetIgnore() bool { + if o.Ignore == nil { + var z bool + return z + } + return *o.Ignore +} + +// WithForce set field Force to given value +func (o *RemoveOptions) WithForce(value bool) *RemoveOptions { + o.Force = &value + return o +} + +// GetForce returns value of field Force +func (o *RemoveOptions) GetForce() bool { + if o.Force == nil { + var z bool + return z + } + return *o.Force +} + +// WithVolumes set field Volumes to given value +func (o *RemoveOptions) WithVolumes(value bool) *RemoveOptions { + o.Volumes = &value + return o +} + +// GetVolumes returns value of field Volumes +func (o *RemoveOptions) GetVolumes() bool { + if o.Volumes == nil { + var z bool + return z + } + return *o.Volumes +} + +// WithTimeout set field Timeout to given value +func (o *RemoveOptions) WithTimeout(value uint) *RemoveOptions { + o.Timeout = &value + return o +} + +// GetTimeout returns value of field Timeout +func (o *RemoveOptions) GetTimeout() uint { + if o.Timeout == nil { + var z uint + return z + } + return *o.Timeout +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_rename_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_rename_options.go new file mode 100644 index 00000000000..1957a39823e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_rename_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *RenameOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *RenameOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithName set field Name to given value +func (o *RenameOptions) WithName(value string) *RenameOptions { + o.Name = &value + return o +} + +// GetName returns value of field Name +func (o *RenameOptions) GetName() string { + if o.Name == nil { + var z string + return z + } + return *o.Name +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizeexectty_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizeexectty_options.go new file mode 100644 index 00000000000..2a7d5b54049 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizeexectty_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ResizeExecTTYOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ResizeExecTTYOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithHeight set field Height to given value +func (o *ResizeExecTTYOptions) WithHeight(value int) *ResizeExecTTYOptions { + o.Height = &value + return o +} + +// GetHeight returns value of field Height +func (o *ResizeExecTTYOptions) GetHeight() int { + if o.Height == nil { + var z int + return z + } + return *o.Height +} + +// WithWidth set field Width to given value +func (o *ResizeExecTTYOptions) WithWidth(value int) *ResizeExecTTYOptions { + o.Width = &value + return o +} + +// GetWidth returns value of field Width +func (o *ResizeExecTTYOptions) GetWidth() int { + if o.Width == nil { + var z int + return z + } + return *o.Width +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizetty_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizetty_options.go new file mode 100644 index 00000000000..fc027c48d92 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizetty_options.go @@ -0,0 +1,63 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ResizeTTYOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ResizeTTYOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithHeight set field Height to given value +func (o *ResizeTTYOptions) WithHeight(value int) *ResizeTTYOptions { + o.Height = &value + return o +} + +// GetHeight returns value of field Height +func (o *ResizeTTYOptions) GetHeight() int { + if o.Height == nil { + var z int + return z + } + return *o.Height +} + +// WithWidth set field Width to given value +func (o *ResizeTTYOptions) WithWidth(value int) *ResizeTTYOptions { + o.Width = &value + return o +} + +// GetWidth returns value of field Width +func (o *ResizeTTYOptions) GetWidth() int { + if o.Width == nil { + var z int + return z + } + return *o.Width +} + +// WithRunning set field Running to given value +func (o *ResizeTTYOptions) WithRunning(value bool) *ResizeTTYOptions { + o.Running = &value + return o +} + +// GetRunning returns value of field Running +func (o *ResizeTTYOptions) GetRunning() bool { + if o.Running == nil { + var z bool + return z + } + return *o.Running +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restart_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restart_options.go new file mode 100644 index 00000000000..f5f20df51de --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restart_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *RestartOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *RestartOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithTimeout set field Timeout to given value +func (o *RestartOptions) WithTimeout(value int) *RestartOptions { + o.Timeout = &value + return o +} + +// GetTimeout returns value of field Timeout +func (o *RestartOptions) GetTimeout() int { + if o.Timeout == nil { + var z int + return z + } + return *o.Timeout +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restore_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restore_options.go new file mode 100644 index 00000000000..b1b14a7042a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restore_options.go @@ -0,0 +1,213 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *RestoreOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *RestoreOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithIgnoreRootfs set field IgnoreRootfs to given value +func (o *RestoreOptions) WithIgnoreRootfs(value bool) *RestoreOptions { + o.IgnoreRootfs = &value + return o +} + +// GetIgnoreRootfs returns value of field IgnoreRootfs +func (o *RestoreOptions) GetIgnoreRootfs() bool { + if o.IgnoreRootfs == nil { + var z bool + return z + } + return *o.IgnoreRootfs +} + +// WithIgnoreVolumes set field IgnoreVolumes to given value +func (o *RestoreOptions) WithIgnoreVolumes(value bool) *RestoreOptions { + o.IgnoreVolumes = &value + return o +} + +// GetIgnoreVolumes returns value of field IgnoreVolumes +func (o *RestoreOptions) GetIgnoreVolumes() bool { + if o.IgnoreVolumes == nil { + var z bool + return z + } + return *o.IgnoreVolumes +} + +// WithIgnoreStaticIP set field IgnoreStaticIP to given value +func (o *RestoreOptions) WithIgnoreStaticIP(value bool) *RestoreOptions { + o.IgnoreStaticIP = &value + return o +} + +// GetIgnoreStaticIP returns value of field IgnoreStaticIP +func (o *RestoreOptions) GetIgnoreStaticIP() bool { + if o.IgnoreStaticIP == nil { + var z bool + return z + } + return *o.IgnoreStaticIP +} + +// WithIgnoreStaticMAC set field IgnoreStaticMAC to given value +func (o *RestoreOptions) WithIgnoreStaticMAC(value bool) *RestoreOptions { + o.IgnoreStaticMAC = &value + return o +} + +// GetIgnoreStaticMAC returns value of field IgnoreStaticMAC +func (o *RestoreOptions) GetIgnoreStaticMAC() bool { + if o.IgnoreStaticMAC == nil { + var z bool + return z + } + return *o.IgnoreStaticMAC +} + +// WithImportAchive set field ImportAchive to given value +func (o *RestoreOptions) WithImportAchive(value string) *RestoreOptions { + o.ImportAchive = &value + return o +} + +// GetImportAchive returns value of field ImportAchive +func (o *RestoreOptions) GetImportAchive() string { + if o.ImportAchive == nil { + var z string + return z + } + return *o.ImportAchive +} + +// WithImportArchive set field ImportArchive to given value +func (o *RestoreOptions) WithImportArchive(value string) *RestoreOptions { + o.ImportArchive = &value + return o +} + +// GetImportArchive returns value of field ImportArchive +func (o *RestoreOptions) GetImportArchive() string { + if o.ImportArchive == nil { + var z string + return z + } + return *o.ImportArchive +} + +// WithKeep set field Keep to given value +func (o *RestoreOptions) WithKeep(value bool) *RestoreOptions { + o.Keep = &value + return o +} + +// GetKeep returns value of field Keep +func (o *RestoreOptions) GetKeep() bool { + if o.Keep == nil { + var z bool + return z + } + return *o.Keep +} + +// WithName set field Name to given value +func (o *RestoreOptions) WithName(value string) *RestoreOptions { + o.Name = &value + return o +} + +// GetName returns value of field Name +func (o *RestoreOptions) GetName() string { + if o.Name == nil { + var z string + return z + } + return *o.Name +} + +// WithTCPEstablished set field TCPEstablished to given value +func (o *RestoreOptions) WithTCPEstablished(value bool) *RestoreOptions { + o.TCPEstablished = &value + return o +} + +// GetTCPEstablished returns value of field TCPEstablished +func (o *RestoreOptions) GetTCPEstablished() bool { + if o.TCPEstablished == nil { + var z bool + return z + } + return *o.TCPEstablished +} + +// WithPod set field Pod to given value +func (o *RestoreOptions) WithPod(value string) *RestoreOptions { + o.Pod = &value + return o +} + +// GetPod returns value of field Pod +func (o *RestoreOptions) GetPod() string { + if o.Pod == nil { + var z string + return z + } + return *o.Pod +} + +// WithPrintStats set field PrintStats to given value +func (o *RestoreOptions) WithPrintStats(value bool) *RestoreOptions { + o.PrintStats = &value + return o +} + +// GetPrintStats returns value of field PrintStats +func (o *RestoreOptions) GetPrintStats() bool { + if o.PrintStats == nil { + var z bool + return z + } + return *o.PrintStats +} + +// WithPublishPorts set field PublishPorts to given value +func (o *RestoreOptions) WithPublishPorts(value []string) *RestoreOptions { + o.PublishPorts = value + return o +} + +// GetPublishPorts returns value of field PublishPorts +func (o *RestoreOptions) GetPublishPorts() []string { + if o.PublishPorts == nil { + var z []string + return z + } + return o.PublishPorts +} + +// WithFileLocks set field FileLocks to given value +func (o *RestoreOptions) WithFileLocks(value bool) *RestoreOptions { + o.FileLocks = &value + return o +} + +// GetFileLocks returns value of field FileLocks +func (o *RestoreOptions) GetFileLocks() bool { + if o.FileLocks == nil { + var z bool + return z + } + return *o.FileLocks +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_shouldrestart_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_shouldrestart_options.go new file mode 100644 index 00000000000..e9d529355eb --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_shouldrestart_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ShouldRestartOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ShouldRestartOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_start_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_start_options.go new file mode 100644 index 00000000000..88342dcd06f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_start_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *StartOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *StartOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithDetachKeys set field DetachKeys to given value +func (o *StartOptions) WithDetachKeys(value string) *StartOptions { + o.DetachKeys = &value + return o +} + +// GetDetachKeys returns value of field DetachKeys +func (o *StartOptions) GetDetachKeys() string { + if o.DetachKeys == nil { + var z string + return z + } + return *o.DetachKeys +} + +// WithRecursive set field Recursive to given value +func (o *StartOptions) WithRecursive(value bool) *StartOptions { + o.Recursive = &value + return o +} + +// GetRecursive returns value of field Recursive +func (o *StartOptions) GetRecursive() bool { + if o.Recursive == nil { + var z bool + return z + } + return *o.Recursive +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go new file mode 100644 index 00000000000..51b3fb41da3 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *StatsOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *StatsOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithStream set field Stream to given value +func (o *StatsOptions) WithStream(value bool) *StatsOptions { + o.Stream = &value + return o +} + +// GetStream returns value of field Stream +func (o *StatsOptions) GetStream() bool { + if o.Stream == nil { + var z bool + return z + } + return *o.Stream +} + +// WithInterval set field Interval to given value +func (o *StatsOptions) WithInterval(value int) *StatsOptions { + o.Interval = &value + return o +} + +// GetInterval returns value of field Interval +func (o *StatsOptions) GetInterval() int { + if o.Interval == nil { + var z int + return z + } + return *o.Interval +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stop_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stop_options.go new file mode 100644 index 00000000000..375557ecb21 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stop_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *StopOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *StopOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithIgnore set field Ignore to given value +func (o *StopOptions) WithIgnore(value bool) *StopOptions { + o.Ignore = &value + return o +} + +// GetIgnore returns value of field Ignore +func (o *StopOptions) GetIgnore() bool { + if o.Ignore == nil { + var z bool + return z + } + return *o.Ignore +} + +// WithTimeout set field Timeout to given value +func (o *StopOptions) WithTimeout(value uint) *StopOptions { + o.Timeout = &value + return o +} + +// GetTimeout returns value of field Timeout +func (o *StopOptions) GetTimeout() uint { + if o.Timeout == nil { + var z uint + return z + } + return *o.Timeout +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_top_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_top_options.go new file mode 100644 index 00000000000..61d37ed0d68 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_top_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *TopOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *TopOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithDescriptors set field Descriptors to given value +func (o *TopOptions) WithDescriptors(value []string) *TopOptions { + o.Descriptors = &value + return o +} + +// GetDescriptors returns value of field Descriptors +func (o *TopOptions) GetDescriptors() []string { + if o.Descriptors == nil { + var z []string + return z + } + return *o.Descriptors +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unmount_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unmount_options.go new file mode 100644 index 00000000000..0faa405c4e1 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unmount_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *UnmountOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *UnmountOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unpause_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unpause_options.go new file mode 100644 index 00000000000..4a967862edb --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unpause_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *UnpauseOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *UnpauseOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_wait_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_wait_options.go new file mode 100644 index 00000000000..e74c0782129 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_wait_options.go @@ -0,0 +1,49 @@ +// Code generated by go generate; DO NOT EDIT. +package containers + +import ( + "net/url" + + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *WaitOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *WaitOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithCondition set field Condition to given value +func (o *WaitOptions) WithCondition(value []define.ContainerStatus) *WaitOptions { + o.Condition = value + return o +} + +// GetCondition returns value of field Condition +func (o *WaitOptions) GetCondition() []define.ContainerStatus { + if o.Condition == nil { + var z []define.ContainerStatus + return z + } + return o.Condition +} + +// WithInterval set field Interval to given value +func (o *WaitOptions) WithInterval(value string) *WaitOptions { + o.Interval = &value + return o +} + +// GetInterval returns value of field Interval +func (o *WaitOptions) GetInterval() string { + if o.Interval == nil { + var z string + return z + } + return *o.Interval +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/doc.go b/vendor/github.com/containers/podman/v4/pkg/bindings/doc.go new file mode 100644 index 00000000000..20b4c42e94c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/doc.go @@ -0,0 +1,5 @@ +package bindings + +/* + See https://github.com/containers/podman/blob/main/pkg/bindings/README.md for details. +*/ diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go b/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go new file mode 100644 index 00000000000..eb95764ba38 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go @@ -0,0 +1,59 @@ +package bindings + +import ( + "encoding/json" + "io/ioutil" + + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/pkg/errors" +) + +var ( + ErrNotImplemented = errors.New("function not implemented") +) + +func handleError(data []byte, unmarshalErrorInto interface{}) error { + if err := json.Unmarshal(data, unmarshalErrorInto); err != nil { + return err + } + return unmarshalErrorInto.(error) +} + +// Process drains the response body, and processes the HTTP status code +// Note: Closing the response.Body is left to the caller +func (h APIResponse) Process(unmarshalInto interface{}) error { + return h.ProcessWithError(unmarshalInto, &errorhandling.ErrorModel{}) +} + +// ProcessWithError drains the response body, and processes the HTTP status code +// Note: Closing the response.Body is left to the caller +func (h APIResponse) ProcessWithError(unmarshalInto interface{}, unmarshalErrorInto interface{}) error { + data, err := ioutil.ReadAll(h.Response.Body) + if err != nil { + return errors.Wrap(err, "unable to process API response") + } + if h.IsSuccess() || h.IsRedirection() { + if unmarshalInto != nil { + return json.Unmarshal(data, unmarshalInto) + } + return nil + } + + if h.IsConflictError() { + return handleError(data, unmarshalErrorInto) + } + + // TODO should we add a debug here with the response code? + return handleError(data, &errorhandling.ErrorModel{}) +} + +func CheckResponseCode(inError error) (int, error) { + switch e := inError.(type) { + case *errorhandling.ErrorModel: + return e.Code(), nil + case *errorhandling.PodConflictErrorModel: + return e.Code(), nil + default: + return -1, errors.New("is not type ErrorModel") + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go new file mode 100644 index 00000000000..51dcd2aa5ee --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go @@ -0,0 +1,721 @@ +package images + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containers/buildah/define" + "github.com/containers/image/v5/types" + "github.com/containers/podman/v4/pkg/auth" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/ioutils" + "github.com/docker/go-units" + "github.com/hashicorp/go-multierror" + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type devino struct { + Dev uint64 + Ino uint64 +} + +var ( + iidRegex = regexp.MustCompile(`^[0-9a-f]{12}`) +) + +// Build creates an image using a containerfile reference +func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions) (*entities.BuildReport, error) { + if options.CommonBuildOpts == nil { + options.CommonBuildOpts = new(define.CommonBuildOptions) + } + + params := url.Values{} + + if caps := options.AddCapabilities; len(caps) > 0 { + c, err := jsoniter.MarshalToString(caps) + if err != nil { + return nil, err + } + params.Add("addcaps", c) + } + + if annotations := options.Annotations; len(annotations) > 0 { + l, err := jsoniter.MarshalToString(annotations) + if err != nil { + return nil, err + } + params.Set("annotations", l) + } + + if options.AllPlatforms { + params.Add("allplatforms", "1") + } + + params.Add("t", options.Output) + for _, tag := range options.AdditionalTags { + params.Add("t", tag) + } + if buildArgs := options.Args; len(buildArgs) > 0 { + bArgs, err := jsoniter.MarshalToString(buildArgs) + if err != nil { + return nil, err + } + params.Set("buildargs", bArgs) + } + if excludes := options.Excludes; len(excludes) > 0 { + bArgs, err := jsoniter.MarshalToString(excludes) + if err != nil { + return nil, err + } + params.Set("excludes", bArgs) + } + if cpuPeriod := options.CommonBuildOpts.CPUPeriod; cpuPeriod > 0 { + params.Set("cpuperiod", strconv.Itoa(int(cpuPeriod))) + } + if cpuQuota := options.CommonBuildOpts.CPUQuota; cpuQuota > 0 { + params.Set("cpuquota", strconv.Itoa(int(cpuQuota))) + } + if cpuSetCpus := options.CommonBuildOpts.CPUSetCPUs; len(cpuSetCpus) > 0 { + params.Set("cpusetcpus", cpuSetCpus) + } + if cpuSetMems := options.CommonBuildOpts.CPUSetMems; len(cpuSetMems) > 0 { + params.Set("cpusetmems", cpuSetMems) + } + if cpuShares := options.CommonBuildOpts.CPUShares; cpuShares > 0 { + params.Set("cpushares", strconv.Itoa(int(cpuShares))) + } + if len(options.CommonBuildOpts.CgroupParent) > 0 { + params.Set("cgroupparent", options.CommonBuildOpts.CgroupParent) + } + + params.Set("networkmode", strconv.Itoa(int(options.ConfigureNetwork))) + params.Set("outputformat", options.OutputFormat) + + if devices := options.Devices; len(devices) > 0 { + d, err := jsoniter.MarshalToString(devices) + if err != nil { + return nil, err + } + params.Add("devices", d) + } + + if dnsservers := options.CommonBuildOpts.DNSServers; len(dnsservers) > 0 { + c, err := jsoniter.MarshalToString(dnsservers) + if err != nil { + return nil, err + } + params.Add("dnsservers", c) + } + if dnsoptions := options.CommonBuildOpts.DNSOptions; len(dnsoptions) > 0 { + c, err := jsoniter.MarshalToString(dnsoptions) + if err != nil { + return nil, err + } + params.Add("dnsoptions", c) + } + if dnssearch := options.CommonBuildOpts.DNSSearch; len(dnssearch) > 0 { + c, err := jsoniter.MarshalToString(dnssearch) + if err != nil { + return nil, err + } + params.Add("dnssearch", c) + } + + if caps := options.DropCapabilities; len(caps) > 0 { + c, err := jsoniter.MarshalToString(caps) + if err != nil { + return nil, err + } + params.Add("dropcaps", c) + } + + if options.ForceRmIntermediateCtrs { + params.Set("forcerm", "1") + } + if options.RemoveIntermediateCtrs { + params.Set("rm", "1") + } else { + params.Set("rm", "0") + } + if len(options.From) > 0 { + params.Set("from", options.From) + } + if options.IgnoreUnrecognizedInstructions { + params.Set("ignore", "1") + } + params.Set("isolation", strconv.Itoa(int(options.Isolation))) + if options.CommonBuildOpts.HTTPProxy { + params.Set("httpproxy", "1") + } + if options.Jobs != nil { + params.Set("jobs", strconv.FormatUint(uint64(*options.Jobs), 10)) + } + if labels := options.Labels; len(labels) > 0 { + l, err := jsoniter.MarshalToString(labels) + if err != nil { + return nil, err + } + params.Set("labels", l) + } + + if opt := options.CommonBuildOpts.LabelOpts; len(opt) > 0 { + o, err := jsoniter.MarshalToString(opt) + if err != nil { + return nil, err + } + params.Set("labelopts", o) + } + + if len(options.CommonBuildOpts.SeccompProfilePath) > 0 { + params.Set("seccomp", options.CommonBuildOpts.SeccompProfilePath) + } + + if len(options.CommonBuildOpts.ApparmorProfile) > 0 { + params.Set("apparmor", options.CommonBuildOpts.ApparmorProfile) + } + + if options.Layers { + params.Set("layers", "1") + } + if options.LogRusage { + params.Set("rusage", "1") + } + if len(options.RusageLogFile) > 0 { + params.Set("rusagelogfile", options.RusageLogFile) + } + if len(options.Manifest) > 0 { + params.Set("manifest", options.Manifest) + } + if memSwap := options.CommonBuildOpts.MemorySwap; memSwap > 0 { + params.Set("memswap", strconv.Itoa(int(memSwap))) + } + if mem := options.CommonBuildOpts.Memory; mem > 0 { + params.Set("memory", strconv.Itoa(int(mem))) + } + if options.NoCache { + params.Set("nocache", "1") + } + if t := options.Output; len(t) > 0 { + params.Set("output", t) + } + if t := options.OSVersion; len(t) > 0 { + params.Set("osversion", t) + } + for _, t := range options.OSFeatures { + params.Set("osfeature", t) + } + var platform string + if len(options.OS) > 0 { + platform = options.OS + } + if len(options.Architecture) > 0 { + if len(platform) == 0 { + platform = "linux" + } + platform += "/" + options.Architecture + } else if len(platform) > 0 { + platform += "/" + runtime.GOARCH + } + if len(platform) > 0 { + params.Set("platform", platform) + } + if len(options.Platforms) > 0 { + params.Del("platform") + for _, platformSpec := range options.Platforms { + platform = platformSpec.OS + "/" + platformSpec.Arch + if platformSpec.Variant != "" { + platform += "/" + platformSpec.Variant + } + params.Add("platform", platform) + } + } + var err error + var contextDir string + if contextDir, err = filepath.EvalSymlinks(options.ContextDirectory); err == nil { + options.ContextDirectory = contextDir + } + + params.Set("pullpolicy", options.PullPolicy.String()) + + switch options.CommonBuildOpts.IdentityLabel { + case types.OptionalBoolTrue: + params.Set("identitylabel", "1") + case types.OptionalBoolFalse: + params.Set("identitylabel", "0") + } + if options.Quiet { + params.Set("q", "1") + } + if options.RemoveIntermediateCtrs { + params.Set("rm", "1") + } + if len(options.Target) > 0 { + params.Set("target", options.Target) + } + + if hosts := options.CommonBuildOpts.AddHost; len(hosts) > 0 { + h, err := jsoniter.MarshalToString(hosts) + if err != nil { + return nil, err + } + params.Set("extrahosts", h) + } + if nsoptions := options.NamespaceOptions; len(nsoptions) > 0 { + ns, err := jsoniter.MarshalToString(nsoptions) + if err != nil { + return nil, err + } + params.Set("nsoptions", ns) + } + if shmSize := options.CommonBuildOpts.ShmSize; len(shmSize) > 0 { + shmBytes, err := units.RAMInBytes(shmSize) + if err != nil { + return nil, err + } + params.Set("shmsize", strconv.Itoa(int(shmBytes))) + } + if options.Squash { + params.Set("squash", "1") + } + + if options.Timestamp != nil { + t := *options.Timestamp + params.Set("timestamp", strconv.FormatInt(t.Unix(), 10)) + } + + if len(options.CommonBuildOpts.Ulimit) > 0 { + ulimitsJSON, err := json.Marshal(options.CommonBuildOpts.Ulimit) + if err != nil { + return nil, err + } + params.Set("ulimits", string(ulimitsJSON)) + } + + for _, env := range options.Envs { + params.Add("setenv", env) + } + + for _, uenv := range options.UnsetEnvs { + params.Add("unsetenv", uenv) + } + + var ( + headers http.Header + ) + if options.SystemContext != nil { + if options.SystemContext.DockerAuthConfig != nil { + headers, err = auth.MakeXRegistryAuthHeader(options.SystemContext, options.SystemContext.DockerAuthConfig.Username, options.SystemContext.DockerAuthConfig.Password) + } else { + headers, err = auth.MakeXRegistryConfigHeader(options.SystemContext, "", "") + } + if options.SystemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue { + params.Set("tlsVerify", "false") + } + } + if err != nil { + return nil, err + } + + stdout := io.Writer(os.Stdout) + if options.Out != nil { + stdout = options.Out + } + + excludes := options.Excludes + if len(excludes) == 0 { + excludes, err = parseDockerignore(options.ContextDirectory) + if err != nil { + return nil, err + } + } + + contextDir, err = filepath.Abs(options.ContextDirectory) + if err != nil { + logrus.Errorf("Cannot find absolute path of %v: %v", options.ContextDirectory, err) + return nil, err + } + + tarContent := []string{options.ContextDirectory} + newContainerFiles := []string{} // dockerfile paths, relative to context dir, ToSlash()ed + + dontexcludes := []string{"!Dockerfile", "!Containerfile", "!.dockerignore", "!.containerignore"} + for _, c := range containerFiles { + if c == "/dev/stdin" { + content, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, err + } + tmpFile, err := ioutil.TempFile("", "build") + if err != nil { + return nil, err + } + defer os.Remove(tmpFile.Name()) // clean up + defer tmpFile.Close() + if _, err := tmpFile.Write(content); err != nil { + return nil, err + } + c = tmpFile.Name() + } + c = filepath.Clean(c) + cfDir := filepath.Dir(c) + if absDir, err := filepath.EvalSymlinks(cfDir); err == nil { + name := filepath.ToSlash(strings.TrimPrefix(c, cfDir+string(filepath.Separator))) + c = filepath.Join(absDir, name) + } + + containerfile, err := filepath.Abs(c) + if err != nil { + logrus.Errorf("Cannot find absolute path of %v: %v", c, err) + return nil, err + } + + // Check if Containerfile is in the context directory, if so truncate the context directory off path + // Do NOT add to tarfile + if strings.HasPrefix(containerfile, contextDir+string(filepath.Separator)) { + containerfile = strings.TrimPrefix(containerfile, contextDir+string(filepath.Separator)) + dontexcludes = append(dontexcludes, "!"+containerfile) + } else { + // If Containerfile does not exist, assume it is in context directory and do Not add to tarfile + if _, err := os.Lstat(containerfile); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + containerfile = c + } else { + // If Containerfile does exist and not in the context directory, add it to the tarfile + tarContent = append(tarContent, containerfile) + } + } + newContainerFiles = append(newContainerFiles, filepath.ToSlash(containerfile)) + } + if len(newContainerFiles) > 0 { + cFileJSON, err := json.Marshal(newContainerFiles) + if err != nil { + return nil, err + } + params.Set("dockerfile", string(cFileJSON)) + } + + // build secrets are usually absolute host path or relative to context dir on host + // in any case move secret to current context and ship the tar. + if secrets := options.CommonBuildOpts.Secrets; len(secrets) > 0 { + secretsForRemote := []string{} + + for _, secret := range secrets { + secretOpt := strings.Split(secret, ",") + if len(secretOpt) > 0 { + modifiedOpt := []string{} + for _, token := range secretOpt { + arr := strings.SplitN(token, "=", 2) + if len(arr) > 1 { + if arr[0] == "src" { + // read specified secret into a tmp file + // move tmp file to tar and change secret source to relative tmp file + tmpSecretFile, err := ioutil.TempFile(options.ContextDirectory, "podman-build-secret") + if err != nil { + return nil, err + } + defer os.Remove(tmpSecretFile.Name()) // clean up + defer tmpSecretFile.Close() + srcSecretFile, err := os.Open(arr[1]) + if err != nil { + return nil, err + } + defer srcSecretFile.Close() + _, err = io.Copy(tmpSecretFile, srcSecretFile) + if err != nil { + return nil, err + } + + // add tmp file to context dir + tarContent = append(tarContent, tmpSecretFile.Name()) + + modifiedSrc := fmt.Sprintf("src=%s", filepath.Base(tmpSecretFile.Name())) + modifiedOpt = append(modifiedOpt, modifiedSrc) + } else { + modifiedOpt = append(modifiedOpt, token) + } + } + } + secretsForRemote = append(secretsForRemote, strings.Join(modifiedOpt, ",")) + } + } + + c, err := jsoniter.MarshalToString(secretsForRemote) + if err != nil { + return nil, err + } + params.Add("secrets", c) + } + + tarfile, err := nTar(append(excludes, dontexcludes...), tarContent...) + if err != nil { + logrus.Errorf("Cannot tar container entries %v error: %v", tarContent, err) + return nil, err + } + defer func() { + if err := tarfile.Close(); err != nil { + logrus.Errorf("%v\n", err) + } + }() + + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, tarfile, http.MethodPost, "/build", params, headers) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if !response.IsSuccess() { + return nil, response.Process(err) + } + + body := response.Body.(io.Reader) + if logrus.IsLevelEnabled(logrus.DebugLevel) { + if v, found := os.LookupEnv("PODMAN_RETAIN_BUILD_ARTIFACT"); found { + if keep, _ := strconv.ParseBool(v); keep { + t, _ := ioutil.TempFile("", "build_*_client") + defer t.Close() + body = io.TeeReader(response.Body, t) + } + } + } + + dec := json.NewDecoder(body) + + var id string + for { + var s struct { + Stream string `json:"stream,omitempty"` + Error string `json:"error,omitempty"` + } + + select { + // FIXME(vrothberg): it seems we always hit the EOF case below, + // even when the server quit but it seems desirable to + // distinguish a proper build from a transient EOF. + case <-response.Request.Context().Done(): + return &entities.BuildReport{ID: id}, nil + default: + // non-blocking select + } + + if err := dec.Decode(&s); err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + return nil, errors.Wrap(err, "server probably quit") + } + // EOF means the stream is over in which case we need + // to have read the id. + if errors.Is(err, io.EOF) && id != "" { + break + } + return &entities.BuildReport{ID: id}, errors.Wrap(err, "decoding stream") + } + + switch { + case s.Stream != "": + raw := []byte(s.Stream) + stdout.Write(raw) + if iidRegex.Match(raw) { + id = strings.TrimSuffix(s.Stream, "\n") + } + case s.Error != "": + // If there's an error, return directly. The stream + // will be closed on return. + return &entities.BuildReport{ID: id}, errors.New(s.Error) + default: + return &entities.BuildReport{ID: id}, errors.New("failed to parse build results stream, unexpected input") + } + } + return &entities.BuildReport{ID: id}, nil +} + +func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { + pm, err := fileutils.NewPatternMatcher(excludes) + if err != nil { + return nil, errors.Wrapf(err, "error processing excludes list %v", excludes) + } + + if len(sources) == 0 { + return nil, errors.New("No source(s) provided for build") + } + + pr, pw := io.Pipe() + gw := gzip.NewWriter(pw) + tw := tar.NewWriter(gw) + + var merr *multierror.Error + go func() { + defer pw.Close() + defer gw.Close() + defer tw.Close() + seen := make(map[devino]string) + for _, src := range sources { + s, err := filepath.Abs(src) + if err != nil { + logrus.Errorf("Cannot stat one of source context: %v", err) + merr = multierror.Append(merr, err) + return + } + err = filepath.WalkDir(s, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // check if what we are given is an empty dir, if so then continue w/ it. Else return. + // if we are given a file or a symlink, we do not want to exclude it. + if d.IsDir() && s == path { + var p *os.File + p, err = os.Open(path) + if err != nil { + return err + } + defer p.Close() + _, err = p.Readdir(1) + if err != io.EOF { + return nil // non empty root dir, need to return + } else if err != nil { + logrus.Errorf("While reading directory %v: %v", path, err) + } + } + name := filepath.ToSlash(strings.TrimPrefix(path, s+string(filepath.Separator))) + + excluded, err := pm.Matches(name) // nolint:staticcheck + if err != nil { + return errors.Wrapf(err, "error checking if %q is excluded", name) + } + if excluded { + // Note: filepath.SkipDir is not possible to use given .dockerignore semantics. + // An exception to exclusions may include an excluded directory, therefore we + // are required to visit all files. :( + return nil + } + switch { + case d.Type().IsRegular(): // add file item + info, err := d.Info() + if err != nil { + return err + } + di, isHardLink := checkHardLink(info) + if err != nil { + return err + } + + hdr, err := tar.FileInfoHeader(info, "") + if err != nil { + return err + } + hdr.Uid, hdr.Gid = 0, 0 + orig, ok := seen[di] + if ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = orig + hdr.Size = 0 + hdr.Name = name + return tw.WriteHeader(hdr) + } + f, err := os.Open(path) + if err != nil { + return err + } + + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + f.Close() + return err + } + + _, err = io.Copy(tw, f) + f.Close() + if err == nil && isHardLink { + seen[di] = name + } + return err + case d.IsDir(): // add folders + info, err := d.Info() + if err != nil { + return err + } + hdr, lerr := tar.FileInfoHeader(info, name) + if lerr != nil { + return lerr + } + hdr.Name = name + hdr.Uid, hdr.Gid = 0, 0 + if lerr := tw.WriteHeader(hdr); lerr != nil { + return lerr + } + case d.Type()&os.ModeSymlink != 0: // add symlinks as it, not content + link, err := os.Readlink(path) + if err != nil { + return err + } + info, err := d.Info() + if err != nil { + return err + } + hdr, lerr := tar.FileInfoHeader(info, link) + if lerr != nil { + return lerr + } + hdr.Name = name + hdr.Uid, hdr.Gid = 0, 0 + if lerr := tw.WriteHeader(hdr); lerr != nil { + return lerr + } + } // skip other than file,folder and symlinks + return nil + }) + merr = multierror.Append(merr, err) + } + }() + rc := ioutils.NewReadCloserWrapper(pr, func() error { + if merr != nil { + merr = multierror.Append(merr, pr.Close()) + return merr.ErrorOrNil() + } + return pr.Close() + }) + return rc, nil +} + +func parseDockerignore(root string) ([]string, error) { + ignore, err := ioutil.ReadFile(filepath.Join(root, ".containerignore")) + if err != nil { + var dockerIgnoreErr error + ignore, dockerIgnoreErr = ioutil.ReadFile(filepath.Join(root, ".dockerignore")) + if dockerIgnoreErr != nil && !os.IsNotExist(dockerIgnoreErr) { + return nil, errors.Wrapf(err, "error reading .containerignore: '%s'", root) + } + } + rawexcludes := strings.Split(string(ignore), "\n") + excludes := make([]string, 0, len(rawexcludes)) + for _, e := range rawexcludes { + if len(e) == 0 || e[0] == '#' { + continue + } + excludes = append(excludes, e) + } + return excludes, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go new file mode 100644 index 00000000000..32e2ba9af9d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package images + +import ( + "os" + "syscall" +) + +func checkHardLink(fi os.FileInfo) (devino, bool) { + st := fi.Sys().(*syscall.Stat_t) + return devino{ + Dev: uint64(st.Dev), // nolint: unconvert + Ino: st.Ino, + }, st.Nlink > 1 +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_windows.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_windows.go new file mode 100644 index 00000000000..bd71d1bf0da --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_windows.go @@ -0,0 +1,9 @@ +package images + +import ( + "os" +) + +func checkHardLink(fi os.FileInfo) (devino, bool) { + return devino{}, false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/diff.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/diff.go new file mode 100644 index 00000000000..2a59e6d6952 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/diff.go @@ -0,0 +1,30 @@ +package images + +import ( + "context" + "net/http" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/storage/pkg/archive" +) + +// Diff provides the changes between two container layers +func Diff(ctx context.Context, nameOrID string, options *DiffOptions) ([]archive.Change, error) { + if options == nil { + options = new(DiffOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/%s/changes", nil, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + var changes []archive.Change + return changes, response.Process(&changes) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go new file mode 100644 index 00000000000..8e3b079298f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go @@ -0,0 +1,350 @@ +package images + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + imageTypes "github.com/containers/image/v5/types" + "github.com/containers/podman/v4/pkg/api/handlers/types" + "github.com/containers/podman/v4/pkg/auth" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/pkg/errors" +) + +// Exists a lightweight way to determine if an image exists in local storage. It returns a +// boolean response. +func Exists(ctx context.Context, nameOrID string, options *ExistsOptions) (bool, error) { + conn, err := bindings.GetClient(ctx) + if err != nil { + return false, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/%s/exists", nil, nil, nameOrID) + if err != nil { + return false, err + } + defer response.Body.Close() + + return response.IsSuccess(), nil +} + +// List returns a list of images in local storage. The all boolean and filters parameters are optional +// ways to alter the image query. +func List(ctx context.Context, options *ListOptions) ([]*entities.ImageSummary, error) { + if options == nil { + options = new(ListOptions) + } + var imageSummary []*entities.ImageSummary + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/json", params, nil) + if err != nil { + return imageSummary, err + } + defer response.Body.Close() + + return imageSummary, response.Process(&imageSummary) +} + +// Get performs an image inspect. To have the on-disk size of the image calculated, you can +// use the optional size parameter. +func GetImage(ctx context.Context, nameOrID string, options *GetOptions) (*entities.ImageInspectReport, error) { + if options == nil { + options = new(GetOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + inspectedData := entities.ImageInspectReport{} + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/%s/json", params, nil, nameOrID) + if err != nil { + return &inspectedData, err + } + defer response.Body.Close() + + return &inspectedData, response.Process(&inspectedData) +} + +// Tree retrieves a "tree" based representation of the given image +func Tree(ctx context.Context, nameOrID string, options *TreeOptions) (*entities.ImageTreeReport, error) { + if options == nil { + options = new(TreeOptions) + } + var report entities.ImageTreeReport + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/%s/tree", params, nil, nameOrID) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return &report, response.Process(&report) +} + +// History returns the parent layers of an image. +func History(ctx context.Context, nameOrID string, options *HistoryOptions) ([]*types.HistoryResponse, error) { + if options == nil { + options = new(HistoryOptions) + } + _ = options + var history []*types.HistoryResponse + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/%s/history", nil, nil, nameOrID) + if err != nil { + return history, err + } + defer response.Body.Close() + + return history, response.Process(&history) +} + +func Load(ctx context.Context, r io.Reader) (*entities.ImageLoadReport, error) { + var report entities.ImageLoadReport + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, r, http.MethodPost, "/images/load", nil, nil) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return &report, response.Process(&report) +} + +// Export saves images from local storage as a tarball or image archive. The optional format +// parameter is used to change the format of the output. +func Export(ctx context.Context, nameOrIDs []string, w io.Writer, options *ExportOptions) error { + if options == nil { + options = new(ExportOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params, err := options.ToParams() + if err != nil { + return err + } + for _, ref := range nameOrIDs { + params.Add("references", ref) + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/export", params, nil) + if err != nil { + return err + } + defer response.Body.Close() + + if response.StatusCode/100 == 2 || response.StatusCode/100 == 3 { + _, err = io.Copy(w, response.Body) + return err + } + return response.Process(nil) +} + +// Prune removes unused images from local storage. The optional filters can be used to further +// define which images should be pruned. +func Prune(ctx context.Context, options *PruneOptions) ([]*reports.PruneReport, error) { + var ( + deleted []*reports.PruneReport + ) + if options == nil { + options = new(PruneOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/images/prune", params, nil) + if err != nil { + return deleted, err + } + defer response.Body.Close() + + return deleted, response.Process(&deleted) +} + +// Tag adds an additional name to locally-stored image. Both the tag and repo parameters are required. +func Tag(ctx context.Context, nameOrID, tag, repo string, options *TagOptions) error { + if options == nil { + options = new(TagOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params := url.Values{} + params.Set("tag", tag) + params.Set("repo", repo) + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/images/%s/tag", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Untag removes a name from locally-stored image. Both the tag and repo parameters are required. +func Untag(ctx context.Context, nameOrID, tag, repo string, options *UntagOptions) error { + if options == nil { + options = new(UntagOptions) + } + _ = options + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + params := url.Values{} + params.Set("tag", tag) + params.Set("repo", repo) + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/images/%s/untag", params, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Import adds the given image to the local image store. This can be done by file and the given reader +// or via the url parameter. Additional metadata can be associated with the image by using the changes and +// message parameters. The image can also be tagged given a reference. One of url OR r must be provided. +func Import(ctx context.Context, r io.Reader, options *ImportOptions) (*entities.ImageImportReport, error) { + if options == nil { + options = new(ImportOptions) + } + var report entities.ImageImportReport + if r != nil && options.URL != nil { + return nil, errors.New("url and r parameters cannot be used together") + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, r, http.MethodPost, "/images/import", params, nil) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return &report, response.Process(&report) +} + +// Push is the binding for libpod's v2 endpoints for push images. Note that +// `source` must be a referring to an image in the remote's container storage. +// The destination must be a reference to a registry (i.e., of docker transport +// or be normalized to one). Other transports are rejected as they do not make +// sense in a remote context. +func Push(ctx context.Context, source string, destination string, options *PushOptions) error { + if options == nil { + options = new(PushOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + // TODO: have a global system context we can pass around (1st argument) + header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) + if err != nil { + return err + } + + params, err := options.ToParams() + if err != nil { + return err + } + // SkipTLSVerify is special. We need to delete the param added by + // toparams and change the key and flip the bool + if options.SkipTLSVerify != nil { + params.Del("SkipTLSVerify") + params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) + } + params.Set("destination", destination) + + path := fmt.Sprintf("/images/%s/push", source) + response, err := conn.DoRequest(ctx, nil, http.MethodPost, path, params, header) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(err) +} + +// Search is the binding for libpod's v2 endpoints for Search images. +func Search(ctx context.Context, term string, options *SearchOptions) ([]entities.ImageSearchReport, error) { + if options == nil { + options = new(SearchOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + params.Set("term", term) + + // Note: we have to verify if skipped is false. + if options.SkipTLSVerify != nil { + params.Del("SkipTLSVerify") + params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) + } + + // TODO: have a global system context we can pass around (1st argument) + header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, "", "") + if err != nil { + return nil, err + } + + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/search", params, header) + if err != nil { + return nil, err + } + defer response.Body.Close() + + results := []entities.ImageSearchReport{} + if err := response.Process(&results); err != nil { + return nil, err + } + + return results, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go new file mode 100644 index 00000000000..20e47179ce6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go @@ -0,0 +1,99 @@ +package images + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + + "github.com/containers/image/v5/types" + "github.com/containers/podman/v4/pkg/auth" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/pkg/errors" +) + +// Pull is the binding for libpod's v2 endpoints for pulling images. Note that +// `rawImage` must be a reference to a registry (i.e., of docker transport or be +// normalized to one). Other transports are rejected as they do not make sense +// in a remote context. Progress reported on stderr +func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string, error) { + if options == nil { + options = new(PullOptions) + } + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + params.Set("reference", rawImage) + + if options.SkipTLSVerify != nil { + params.Del("SkipTLSVerify") + // Note: we have to verify if skipped is false. + params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) + } + + // TODO: have a global system context we can pass around (1st argument) + header, err := auth.MakeXRegistryAuthHeader(&types.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) + if err != nil { + return nil, err + } + + response, err := conn.DoRequest(ctx, nil, http.MethodPost, "/images/pull", params, header) + if err != nil { + return nil, err + } + defer response.Body.Close() + + if !response.IsSuccess() { + return nil, response.Process(err) + } + + // Historically pull writes status to stderr + stderr := io.Writer(os.Stderr) + if options.GetQuiet() { + stderr = ioutil.Discard + } + + dec := json.NewDecoder(response.Body) + var images []string + var pullErrors []error + for { + var report entities.ImagePullReport + if err := dec.Decode(&report); err != nil { + if errors.Is(err, io.EOF) { + break + } + report.Error = err.Error() + "\n" + } + + select { + case <-response.Request.Context().Done(): + break + default: + // non-blocking select + } + + switch { + case report.Stream != "": + fmt.Fprint(stderr, report.Stream) + case report.Error != "": + pullErrors = append(pullErrors, errors.New(report.Error)) + case len(report.Images) > 0: + images = report.Images + case report.ID != "": + default: + return images, errors.Errorf("failed to parse pull results stream, unexpected input: %v", report) + } + } + return images, errorhandling.JoinErrors(pullErrors) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go new file mode 100644 index 00000000000..b80bacf4539 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go @@ -0,0 +1,46 @@ +package images + +import ( + "context" + "net/http" + + "github.com/containers/podman/v4/pkg/api/handlers/types" + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v4/pkg/errorhandling" +) + +// Remove removes one or more images from the local storage. Use optional force option to remove an +// image, even if it's used by containers. +func Remove(ctx context.Context, images []string, options *RemoveOptions) (*entities.ImageRemoveReport, []error) { + if options == nil { + options = new(RemoveOptions) + } + // FIXME - bindings tests are missing for this endpoint. Once the CI is + // re-enabled for bindings, we need to add them. At the time of writing, + // the tests don't compile. + var report types.LibpodImagesRemoveReport + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, []error{err} + } + + params, err := options.ToParams() + if err != nil { + return nil, nil + } + for _, image := range images { + params.Add("images", image) + } + response, err := conn.DoRequest(ctx, nil, http.MethodDelete, "/images/remove", params, nil) + if err != nil { + return nil, []error{err} + } + defer response.Body.Close() + + if err := response.Process(&report); err != nil { + return nil, []error{err} + } + + return &report.ImageRemoveReport, errorhandling.StringsToErrors(report.Errors) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go new file mode 100644 index 00000000000..8e5e7ee929e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go @@ -0,0 +1,188 @@ +package images + +import ( + buildahDefine "github.com/containers/buildah/define" +) + +//go:generate go run ../generator/generator.go RemoveOptions +// RemoveOptions are optional options for image removal +type RemoveOptions struct { + // All removes all images + All *bool + // Forces removes all containers based on the image + Force *bool + // Ignore if a specified image does not exist and do not throw an error. + Ignore *bool +} + +//go:generate go run ../generator/generator.go DiffOptions +// DiffOptions are optional options image diffs +type DiffOptions struct { + // By the default diff will compare against the parent layer. Change the Parent if you want to compare against something else. + Parent *string + // Change the type the backend should match. This can be set to "all", "container" or "image". + DiffType *string +} + +//go:generate go run ../generator/generator.go ListOptions +// ListOptions are optional options for listing images +type ListOptions struct { + // All lists all image in the image store including dangling images + All *bool + // filters that can be used to get a more specific list of images + Filters map[string][]string +} + +//go:generate go run ../generator/generator.go GetOptions +// GetOptions are optional options for inspecting an image +type GetOptions struct { + // Size computes the amount of storage the image consumes + Size *bool +} + +//go:generate go run ../generator/generator.go TreeOptions +// TreeOptions are optional options for a tree-based representation +// of the image +type TreeOptions struct { + // WhatRequires ... + WhatRequires *bool +} + +//go:generate go run ../generator/generator.go HistoryOptions +// HistoryOptions are optional options image history +type HistoryOptions struct { +} + +//go:generate go run ../generator/generator.go LoadOptions +// LoadOptions are optional options for loading an image +type LoadOptions struct { + // Reference is the name of the loaded image + Reference *string +} + +//go:generate go run ../generator/generator.go ExportOptions +// ExportOptions are optional options for exporting images +type ExportOptions struct { + // Compress the image + Compress *bool + // Format of the output + Format *string + // Accept uncompressed layers when copying OCI images. + OciAcceptUncompressedLayers *bool +} + +//go:generate go run ../generator/generator.go PruneOptions +// PruneOptions are optional options for pruning images +type PruneOptions struct { + // Prune all images + All *bool + // Prune images even when they're used by external containers + External *bool + // Filters to apply when pruning images + Filters map[string][]string +} + +//go:generate go run ../generator/generator.go TagOptions +// TagOptions are optional options for tagging images +type TagOptions struct { +} + +//go:generate go run ../generator/generator.go UntagOptions +// UntagOptions are optional options for untagging images +type UntagOptions struct { +} + +//go:generate go run ../generator/generator.go ImportOptions +// ImportOptions are optional options for importing images +type ImportOptions struct { + // Changes to be applied to the image + Changes *[]string + // Message to be applied to the image + Message *string + // Reference is a tag to be applied to the image + Reference *string + // Url to option image to import. Cannot be used with the reader + URL *string + // OS for the imported image + OS *string + // Architecture for the imported image + Architecture *string + // Variant for the imported image + Variant *string +} + +//go:generate go run ../generator/generator.go PushOptions +// PushOptions are optional options for importing images +type PushOptions struct { + // All indicates whether to push all images related to the image list + All *bool + // Authfile is the path to the authentication file. Ignored for remote + // calls. + Authfile *string + // Compress tarball image layers when pushing to a directory using the 'dir' transport. + Compress *bool + // Manifest type of the pushed image + Format *string + // Password for authenticating against the registry. + Password *string + // SkipTLSVerify to skip HTTPS and certificate verification. + SkipTLSVerify *bool + // Username for authenticating against the registry. + Username *string +} + +//go:generate go run ../generator/generator.go SearchOptions +// SearchOptions are optional options for searching images on registries +type SearchOptions struct { + // Authfile is the path to the authentication file. Ignored for remote + // calls. + Authfile *string + // Filters for the search results. + Filters map[string][]string + // Limit the number of results. + Limit *int + // SkipTLSVerify to skip HTTPS and certificate verification. + SkipTLSVerify *bool + // ListTags search the available tags of the repository + ListTags *bool +} + +//go:generate go run ../generator/generator.go PullOptions +// PullOptions are optional options for pulling images +type PullOptions struct { + // AllTags can be specified to pull all tags of an image. Note + // that this only works if the image does not include a tag. + AllTags *bool + // Arch will overwrite the local architecture for image pulls. + Arch *string + // Authfile is the path to the authentication file. Ignored for remote + // calls. + Authfile *string + // OS will overwrite the local operating system (OS) for image + // pulls. + OS *string + // Policy is the pull policy. Supported values are "missing", "never", + // "newer", "always". An empty string defaults to "always". + Policy *string + // Password for authenticating against the registry. + Password *string + // Quiet can be specified to suppress pull progress when pulling. Ignored + // for remote calls. + Quiet *bool + // SkipTLSVerify to skip HTTPS and certificate verification. + SkipTLSVerify *bool + // Username for authenticating against the registry. + Username *string + // Variant will overwrite the local variant for image pulls. + Variant *string +} + +// BuildOptions are optional options for building images +type BuildOptions struct { + buildahDefine.BuildOptions +} + +//go:generate go run ../generator/generator.go ExistsOptions +// ExistsOptions are optional options for checking if an image exists +type ExistsOptions struct { +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_diff_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_diff_options.go new file mode 100644 index 00000000000..3a303215573 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_diff_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *DiffOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *DiffOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithParent set field Parent to given value +func (o *DiffOptions) WithParent(value string) *DiffOptions { + o.Parent = &value + return o +} + +// GetParent returns value of field Parent +func (o *DiffOptions) GetParent() string { + if o.Parent == nil { + var z string + return z + } + return *o.Parent +} + +// WithDiffType set field DiffType to given value +func (o *DiffOptions) WithDiffType(value string) *DiffOptions { + o.DiffType = &value + return o +} + +// GetDiffType returns value of field DiffType +func (o *DiffOptions) GetDiffType() string { + if o.DiffType == nil { + var z string + return z + } + return *o.DiffType +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_exists_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_exists_options.go new file mode 100644 index 00000000000..7521d85f3ac --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_exists_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ExistsOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ExistsOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_export_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_export_options.go new file mode 100644 index 00000000000..77850734c41 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_export_options.go @@ -0,0 +1,63 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ExportOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ExportOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithCompress set field Compress to given value +func (o *ExportOptions) WithCompress(value bool) *ExportOptions { + o.Compress = &value + return o +} + +// GetCompress returns value of field Compress +func (o *ExportOptions) GetCompress() bool { + if o.Compress == nil { + var z bool + return z + } + return *o.Compress +} + +// WithFormat set field Format to given value +func (o *ExportOptions) WithFormat(value string) *ExportOptions { + o.Format = &value + return o +} + +// GetFormat returns value of field Format +func (o *ExportOptions) GetFormat() string { + if o.Format == nil { + var z string + return z + } + return *o.Format +} + +// WithOciAcceptUncompressedLayers set field OciAcceptUncompressedLayers to given value +func (o *ExportOptions) WithOciAcceptUncompressedLayers(value bool) *ExportOptions { + o.OciAcceptUncompressedLayers = &value + return o +} + +// GetOciAcceptUncompressedLayers returns value of field OciAcceptUncompressedLayers +func (o *ExportOptions) GetOciAcceptUncompressedLayers() bool { + if o.OciAcceptUncompressedLayers == nil { + var z bool + return z + } + return *o.OciAcceptUncompressedLayers +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_get_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_get_options.go new file mode 100644 index 00000000000..36b62272e28 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_get_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *GetOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *GetOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithSize set field Size to given value +func (o *GetOptions) WithSize(value bool) *GetOptions { + o.Size = &value + return o +} + +// GetSize returns value of field Size +func (o *GetOptions) GetSize() bool { + if o.Size == nil { + var z bool + return z + } + return *o.Size +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_history_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_history_options.go new file mode 100644 index 00000000000..a9abb482a64 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_history_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *HistoryOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *HistoryOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_import_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_import_options.go new file mode 100644 index 00000000000..f958fe8b480 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_import_options.go @@ -0,0 +1,123 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ImportOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ImportOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithChanges set field Changes to given value +func (o *ImportOptions) WithChanges(value []string) *ImportOptions { + o.Changes = &value + return o +} + +// GetChanges returns value of field Changes +func (o *ImportOptions) GetChanges() []string { + if o.Changes == nil { + var z []string + return z + } + return *o.Changes +} + +// WithMessage set field Message to given value +func (o *ImportOptions) WithMessage(value string) *ImportOptions { + o.Message = &value + return o +} + +// GetMessage returns value of field Message +func (o *ImportOptions) GetMessage() string { + if o.Message == nil { + var z string + return z + } + return *o.Message +} + +// WithReference set field Reference to given value +func (o *ImportOptions) WithReference(value string) *ImportOptions { + o.Reference = &value + return o +} + +// GetReference returns value of field Reference +func (o *ImportOptions) GetReference() string { + if o.Reference == nil { + var z string + return z + } + return *o.Reference +} + +// WithURL set field URL to given value +func (o *ImportOptions) WithURL(value string) *ImportOptions { + o.URL = &value + return o +} + +// GetURL returns value of field URL +func (o *ImportOptions) GetURL() string { + if o.URL == nil { + var z string + return z + } + return *o.URL +} + +// WithOS set field OS to given value +func (o *ImportOptions) WithOS(value string) *ImportOptions { + o.OS = &value + return o +} + +// GetOS returns value of field OS +func (o *ImportOptions) GetOS() string { + if o.OS == nil { + var z string + return z + } + return *o.OS +} + +// WithArchitecture set field Architecture to given value +func (o *ImportOptions) WithArchitecture(value string) *ImportOptions { + o.Architecture = &value + return o +} + +// GetArchitecture returns value of field Architecture +func (o *ImportOptions) GetArchitecture() string { + if o.Architecture == nil { + var z string + return z + } + return *o.Architecture +} + +// WithVariant set field Variant to given value +func (o *ImportOptions) WithVariant(value string) *ImportOptions { + o.Variant = &value + return o +} + +// GetVariant returns value of field Variant +func (o *ImportOptions) GetVariant() string { + if o.Variant == nil { + var z string + return z + } + return *o.Variant +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_list_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_list_options.go new file mode 100644 index 00000000000..f47cd9c7573 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_list_options.go @@ -0,0 +1,48 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ListOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ListOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAll set field All to given value +func (o *ListOptions) WithAll(value bool) *ListOptions { + o.All = &value + return o +} + +// GetAll returns value of field All +func (o *ListOptions) GetAll() bool { + if o.All == nil { + var z bool + return z + } + return *o.All +} + +// WithFilters set field Filters to given value +func (o *ListOptions) WithFilters(value map[string][]string) *ListOptions { + o.Filters = value + return o +} + +// GetFilters returns value of field Filters +func (o *ListOptions) GetFilters() map[string][]string { + if o.Filters == nil { + var z map[string][]string + return z + } + return o.Filters +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_load_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_load_options.go new file mode 100644 index 00000000000..9978201ce5a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_load_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *LoadOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *LoadOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithReference set field Reference to given value +func (o *LoadOptions) WithReference(value string) *LoadOptions { + o.Reference = &value + return o +} + +// GetReference returns value of field Reference +func (o *LoadOptions) GetReference() string { + if o.Reference == nil { + var z string + return z + } + return *o.Reference +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_prune_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_prune_options.go new file mode 100644 index 00000000000..eddf1ae453d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_prune_options.go @@ -0,0 +1,63 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *PruneOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *PruneOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAll set field All to given value +func (o *PruneOptions) WithAll(value bool) *PruneOptions { + o.All = &value + return o +} + +// GetAll returns value of field All +func (o *PruneOptions) GetAll() bool { + if o.All == nil { + var z bool + return z + } + return *o.All +} + +// WithExternal set field External to given value +func (o *PruneOptions) WithExternal(value bool) *PruneOptions { + o.External = &value + return o +} + +// GetExternal returns value of field External +func (o *PruneOptions) GetExternal() bool { + if o.External == nil { + var z bool + return z + } + return *o.External +} + +// WithFilters set field Filters to given value +func (o *PruneOptions) WithFilters(value map[string][]string) *PruneOptions { + o.Filters = value + return o +} + +// GetFilters returns value of field Filters +func (o *PruneOptions) GetFilters() map[string][]string { + if o.Filters == nil { + var z map[string][]string + return z + } + return o.Filters +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go new file mode 100644 index 00000000000..4cd52518584 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go @@ -0,0 +1,168 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *PullOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *PullOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAllTags set field AllTags to given value +func (o *PullOptions) WithAllTags(value bool) *PullOptions { + o.AllTags = &value + return o +} + +// GetAllTags returns value of field AllTags +func (o *PullOptions) GetAllTags() bool { + if o.AllTags == nil { + var z bool + return z + } + return *o.AllTags +} + +// WithArch set field Arch to given value +func (o *PullOptions) WithArch(value string) *PullOptions { + o.Arch = &value + return o +} + +// GetArch returns value of field Arch +func (o *PullOptions) GetArch() string { + if o.Arch == nil { + var z string + return z + } + return *o.Arch +} + +// WithAuthfile set field Authfile to given value +func (o *PullOptions) WithAuthfile(value string) *PullOptions { + o.Authfile = &value + return o +} + +// GetAuthfile returns value of field Authfile +func (o *PullOptions) GetAuthfile() string { + if o.Authfile == nil { + var z string + return z + } + return *o.Authfile +} + +// WithOS set field OS to given value +func (o *PullOptions) WithOS(value string) *PullOptions { + o.OS = &value + return o +} + +// GetOS returns value of field OS +func (o *PullOptions) GetOS() string { + if o.OS == nil { + var z string + return z + } + return *o.OS +} + +// WithPolicy set field Policy to given value +func (o *PullOptions) WithPolicy(value string) *PullOptions { + o.Policy = &value + return o +} + +// GetPolicy returns value of field Policy +func (o *PullOptions) GetPolicy() string { + if o.Policy == nil { + var z string + return z + } + return *o.Policy +} + +// WithPassword set field Password to given value +func (o *PullOptions) WithPassword(value string) *PullOptions { + o.Password = &value + return o +} + +// GetPassword returns value of field Password +func (o *PullOptions) GetPassword() string { + if o.Password == nil { + var z string + return z + } + return *o.Password +} + +// WithQuiet set field Quiet to given value +func (o *PullOptions) WithQuiet(value bool) *PullOptions { + o.Quiet = &value + return o +} + +// GetQuiet returns value of field Quiet +func (o *PullOptions) GetQuiet() bool { + if o.Quiet == nil { + var z bool + return z + } + return *o.Quiet +} + +// WithSkipTLSVerify set field SkipTLSVerify to given value +func (o *PullOptions) WithSkipTLSVerify(value bool) *PullOptions { + o.SkipTLSVerify = &value + return o +} + +// GetSkipTLSVerify returns value of field SkipTLSVerify +func (o *PullOptions) GetSkipTLSVerify() bool { + if o.SkipTLSVerify == nil { + var z bool + return z + } + return *o.SkipTLSVerify +} + +// WithUsername set field Username to given value +func (o *PullOptions) WithUsername(value string) *PullOptions { + o.Username = &value + return o +} + +// GetUsername returns value of field Username +func (o *PullOptions) GetUsername() string { + if o.Username == nil { + var z string + return z + } + return *o.Username +} + +// WithVariant set field Variant to given value +func (o *PullOptions) WithVariant(value string) *PullOptions { + o.Variant = &value + return o +} + +// GetVariant returns value of field Variant +func (o *PullOptions) GetVariant() string { + if o.Variant == nil { + var z string + return z + } + return *o.Variant +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go new file mode 100644 index 00000000000..4985c945123 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go @@ -0,0 +1,123 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *PushOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *PushOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAll set field All to given value +func (o *PushOptions) WithAll(value bool) *PushOptions { + o.All = &value + return o +} + +// GetAll returns value of field All +func (o *PushOptions) GetAll() bool { + if o.All == nil { + var z bool + return z + } + return *o.All +} + +// WithAuthfile set field Authfile to given value +func (o *PushOptions) WithAuthfile(value string) *PushOptions { + o.Authfile = &value + return o +} + +// GetAuthfile returns value of field Authfile +func (o *PushOptions) GetAuthfile() string { + if o.Authfile == nil { + var z string + return z + } + return *o.Authfile +} + +// WithCompress set field Compress to given value +func (o *PushOptions) WithCompress(value bool) *PushOptions { + o.Compress = &value + return o +} + +// GetCompress returns value of field Compress +func (o *PushOptions) GetCompress() bool { + if o.Compress == nil { + var z bool + return z + } + return *o.Compress +} + +// WithFormat set field Format to given value +func (o *PushOptions) WithFormat(value string) *PushOptions { + o.Format = &value + return o +} + +// GetFormat returns value of field Format +func (o *PushOptions) GetFormat() string { + if o.Format == nil { + var z string + return z + } + return *o.Format +} + +// WithPassword set field Password to given value +func (o *PushOptions) WithPassword(value string) *PushOptions { + o.Password = &value + return o +} + +// GetPassword returns value of field Password +func (o *PushOptions) GetPassword() string { + if o.Password == nil { + var z string + return z + } + return *o.Password +} + +// WithSkipTLSVerify set field SkipTLSVerify to given value +func (o *PushOptions) WithSkipTLSVerify(value bool) *PushOptions { + o.SkipTLSVerify = &value + return o +} + +// GetSkipTLSVerify returns value of field SkipTLSVerify +func (o *PushOptions) GetSkipTLSVerify() bool { + if o.SkipTLSVerify == nil { + var z bool + return z + } + return *o.SkipTLSVerify +} + +// WithUsername set field Username to given value +func (o *PushOptions) WithUsername(value string) *PushOptions { + o.Username = &value + return o +} + +// GetUsername returns value of field Username +func (o *PushOptions) GetUsername() string { + if o.Username == nil { + var z string + return z + } + return *o.Username +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go new file mode 100644 index 00000000000..613a33183de --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go @@ -0,0 +1,63 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *RemoveOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *RemoveOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAll set field All to given value +func (o *RemoveOptions) WithAll(value bool) *RemoveOptions { + o.All = &value + return o +} + +// GetAll returns value of field All +func (o *RemoveOptions) GetAll() bool { + if o.All == nil { + var z bool + return z + } + return *o.All +} + +// WithForce set field Force to given value +func (o *RemoveOptions) WithForce(value bool) *RemoveOptions { + o.Force = &value + return o +} + +// GetForce returns value of field Force +func (o *RemoveOptions) GetForce() bool { + if o.Force == nil { + var z bool + return z + } + return *o.Force +} + +// WithIgnore set field Ignore to given value +func (o *RemoveOptions) WithIgnore(value bool) *RemoveOptions { + o.Ignore = &value + return o +} + +// GetIgnore returns value of field Ignore +func (o *RemoveOptions) GetIgnore() bool { + if o.Ignore == nil { + var z bool + return z + } + return *o.Ignore +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_search_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_search_options.go new file mode 100644 index 00000000000..63de15c5982 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_search_options.go @@ -0,0 +1,93 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *SearchOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *SearchOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAuthfile set field Authfile to given value +func (o *SearchOptions) WithAuthfile(value string) *SearchOptions { + o.Authfile = &value + return o +} + +// GetAuthfile returns value of field Authfile +func (o *SearchOptions) GetAuthfile() string { + if o.Authfile == nil { + var z string + return z + } + return *o.Authfile +} + +// WithFilters set field Filters to given value +func (o *SearchOptions) WithFilters(value map[string][]string) *SearchOptions { + o.Filters = value + return o +} + +// GetFilters returns value of field Filters +func (o *SearchOptions) GetFilters() map[string][]string { + if o.Filters == nil { + var z map[string][]string + return z + } + return o.Filters +} + +// WithLimit set field Limit to given value +func (o *SearchOptions) WithLimit(value int) *SearchOptions { + o.Limit = &value + return o +} + +// GetLimit returns value of field Limit +func (o *SearchOptions) GetLimit() int { + if o.Limit == nil { + var z int + return z + } + return *o.Limit +} + +// WithSkipTLSVerify set field SkipTLSVerify to given value +func (o *SearchOptions) WithSkipTLSVerify(value bool) *SearchOptions { + o.SkipTLSVerify = &value + return o +} + +// GetSkipTLSVerify returns value of field SkipTLSVerify +func (o *SearchOptions) GetSkipTLSVerify() bool { + if o.SkipTLSVerify == nil { + var z bool + return z + } + return *o.SkipTLSVerify +} + +// WithListTags set field ListTags to given value +func (o *SearchOptions) WithListTags(value bool) *SearchOptions { + o.ListTags = &value + return o +} + +// GetListTags returns value of field ListTags +func (o *SearchOptions) GetListTags() bool { + if o.ListTags == nil { + var z bool + return z + } + return *o.ListTags +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tag_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tag_options.go new file mode 100644 index 00000000000..30cef4dd2c3 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tag_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *TagOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *TagOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tree_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tree_options.go new file mode 100644 index 00000000000..57eecf959a2 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tree_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *TreeOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *TreeOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithWhatRequires set field WhatRequires to given value +func (o *TreeOptions) WithWhatRequires(value bool) *TreeOptions { + o.WhatRequires = &value + return o +} + +// GetWhatRequires returns value of field WhatRequires +func (o *TreeOptions) GetWhatRequires() bool { + if o.WhatRequires == nil { + var z bool + return z + } + return *o.WhatRequires +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_untag_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_untag_options.go new file mode 100644 index 00000000000..eba687f3d2c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_untag_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package images + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *UntagOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *UntagOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go b/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go new file mode 100644 index 00000000000..f8f99d6c188 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go @@ -0,0 +1,114 @@ +package util + +import ( + "errors" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +func IsSimpleType(f reflect.Value) bool { + if _, ok := f.Interface().(fmt.Stringer); ok { + return true + } + + switch f.Kind() { + case reflect.Bool, reflect.Int, reflect.Int64, reflect.Uint, reflect.Uint64, reflect.String: + return true + } + + return false +} + +func SimpleTypeToParam(f reflect.Value) string { + if s, ok := f.Interface().(fmt.Stringer); ok { + return s.String() + } + + switch f.Kind() { + case reflect.Bool: + return strconv.FormatBool(f.Bool()) + case reflect.Int, reflect.Int64: + // f.Int() is always an int64 + return strconv.FormatInt(f.Int(), 10) + case reflect.Uint, reflect.Uint64: + // f.Uint() is always an uint64 + return strconv.FormatUint(f.Uint(), 10) + case reflect.String: + return f.String() + } + + panic("the input parameter is not a simple type") +} + +func Changed(o interface{}, fieldName string) bool { + r := reflect.ValueOf(o) + value := reflect.Indirect(r).FieldByName(fieldName) + return !value.IsNil() +} + +func ToParams(o interface{}) (url.Values, error) { + params := url.Values{} + if o == nil || reflect.ValueOf(o).IsNil() { + return params, nil + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + s := reflect.ValueOf(o) + if reflect.Ptr == s.Kind() { + s = s.Elem() + } + sType := s.Type() + for i := 0; i < s.NumField(); i++ { + fieldName := sType.Field(i).Name + if !Changed(o, fieldName) { + continue + } + fieldName = strings.ToLower(fieldName) + f := s.Field(i) + if reflect.Ptr == f.Kind() { + f = f.Elem() + } + paramName := fieldName + if pn, ok := sType.Field(i).Tag.Lookup("schema"); ok { + paramName = pn + } + switch { + case IsSimpleType(f): + params.Set(paramName, SimpleTypeToParam(f)) + case f.Kind() == reflect.Slice: + for i := 0; i < f.Len(); i++ { + elem := f.Index(i) + if IsSimpleType(elem) { + params.Add(paramName, SimpleTypeToParam(elem)) + } else { + return nil, errors.New("slices must contain only simple types") + } + } + case f.Kind() == reflect.Map: + lowerCaseKeys := make(map[string]interface{}) + iter := f.MapRange() + for iter.Next() { + lowerCaseKeys[iter.Key().Interface().(string)] = iter.Value().Interface() + } + s, err := json.MarshalToString(lowerCaseKeys) + if err != nil { + return nil, err + } + + params.Set(paramName, s) + } + } + return params, nil +} + +func MapToArrayString(data map[string]string) []string { + l := make([]string, 0) + for k, v := range data { + l = append(l, k+"="+v) + } + return l +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go new file mode 100644 index 00000000000..7ad827dd339 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go @@ -0,0 +1,90 @@ +package secrets + +import ( + "context" + "io" + "net/http" + + "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v4/pkg/domain/entities" +) + +// List returns information about existing secrets in the form of a slice. +func List(ctx context.Context, options *ListOptions) ([]*entities.SecretInfoReport, error) { + var ( + secrs []*entities.SecretInfoReport + ) + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/secrets/json", params, nil) + if err != nil { + return secrs, err + } + defer response.Body.Close() + + return secrs, response.Process(&secrs) +} + +// Inspect returns low-level information about a secret. +func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entities.SecretInfoReport, error) { + var ( + inspect *entities.SecretInfoReport + ) + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/secrets/%s/json", nil, nil, nameOrID) + if err != nil { + return inspect, err + } + defer response.Body.Close() + + return inspect, response.Process(&inspect) +} + +// Remove removes a secret from storage +func Remove(ctx context.Context, nameOrID string) error { + conn, err := bindings.GetClient(ctx) + if err != nil { + return err + } + + response, err := conn.DoRequest(ctx, nil, http.MethodDelete, "/secrets/%s", nil, nil, nameOrID) + if err != nil { + return err + } + defer response.Body.Close() + + return response.Process(nil) +} + +// Create creates a secret given some data +func Create(ctx context.Context, reader io.Reader, options *CreateOptions) (*entities.SecretCreateReport, error) { + var ( + create *entities.SecretCreateReport + ) + conn, err := bindings.GetClient(ctx) + if err != nil { + return nil, err + } + + params, err := options.ToParams() + if err != nil { + return nil, err + } + + response, err := conn.DoRequest(ctx, reader, http.MethodPost, "/secrets/create", params, nil) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return create, response.Process(&create) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go new file mode 100644 index 00000000000..01c3c248d1d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go @@ -0,0 +1,25 @@ +package secrets + +//go:generate go run ../generator/generator.go ListOptions +// ListOptions are optional options for inspecting secrets +type ListOptions struct { + Filters map[string][]string +} + +//go:generate go run ../generator/generator.go InspectOptions +// InspectOptions are optional options for inspecting secrets +type InspectOptions struct { +} + +//go:generate go run ../generator/generator.go RemoveOptions +// RemoveOptions are optional options for removing secrets +type RemoveOptions struct { +} + +//go:generate go run ../generator/generator.go CreateOptions +// CreateOptions are optional options for Creating secrets +type CreateOptions struct { + Name *string + Driver *string + DriverOpts map[string]string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go new file mode 100644 index 00000000000..6b1666a427b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go @@ -0,0 +1,63 @@ +// Code generated by go generate; DO NOT EDIT. +package secrets + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *CreateOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *CreateOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithName set field Name to given value +func (o *CreateOptions) WithName(value string) *CreateOptions { + o.Name = &value + return o +} + +// GetName returns value of field Name +func (o *CreateOptions) GetName() string { + if o.Name == nil { + var z string + return z + } + return *o.Name +} + +// WithDriver set field Driver to given value +func (o *CreateOptions) WithDriver(value string) *CreateOptions { + o.Driver = &value + return o +} + +// GetDriver returns value of field Driver +func (o *CreateOptions) GetDriver() string { + if o.Driver == nil { + var z string + return z + } + return *o.Driver +} + +// WithDriverOpts set field DriverOpts to given value +func (o *CreateOptions) WithDriverOpts(value map[string]string) *CreateOptions { + o.DriverOpts = value + return o +} + +// GetDriverOpts returns value of field DriverOpts +func (o *CreateOptions) GetDriverOpts() map[string]string { + if o.DriverOpts == nil { + var z map[string]string + return z + } + return o.DriverOpts +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go new file mode 100644 index 00000000000..fe26ae3b86a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package secrets + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *InspectOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *InspectOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_list_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_list_options.go new file mode 100644 index 00000000000..97351e16d06 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_list_options.go @@ -0,0 +1,33 @@ +// Code generated by go generate; DO NOT EDIT. +package secrets + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *ListOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *ListOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithFilters set field Filters to given value +func (o *ListOptions) WithFilters(value map[string][]string) *ListOptions { + o.Filters = value + return o +} + +// GetFilters returns value of field Filters +func (o *ListOptions) GetFilters() map[string][]string { + if o.Filters == nil { + var z map[string][]string + return z + } + return o.Filters +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_remove_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_remove_options.go new file mode 100644 index 00000000000..f8a060fdda9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_remove_options.go @@ -0,0 +1,18 @@ +// Code generated by go generate; DO NOT EDIT. +package secrets + +import ( + "net/url" + + "github.com/containers/podman/v4/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *RemoveOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *RemoveOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go b/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go new file mode 100644 index 00000000000..0ccca5b6ee0 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go @@ -0,0 +1,104 @@ +package copy + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "os" + "path/filepath" + "strings" + + "github.com/containers/podman/v4/libpod/define" + "github.com/pkg/errors" +) + +// XDockerContainerPathStatHeader is the *key* in http headers pointing to the +// base64 encoded JSON payload of stating a path in a container. +const XDockerContainerPathStatHeader = "X-Docker-Container-Path-Stat" + +// ErrENOENT mimics the stdlib's ErrENOENT and can be used to implement custom logic +// while preserving the user-visible error message. +var ErrENOENT = errors.New("No such file or directory") + +// FileInfo describes a file or directory and is returned by +// (*CopyItem).Stat(). +type FileInfo = define.FileInfo + +// EncodeFileInfo serializes the specified FileInfo as a base64 encoded JSON +// payload. Intended for Docker compat. +func EncodeFileInfo(info *FileInfo) (string, error) { + buf, err := json.Marshal(&info) + if err != nil { + return "", errors.Wrap(err, "failed to serialize file stats") + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// ExtractFileInfoFromHeader extracts a base64 encoded JSON payload of a +// FileInfo in the http header. If no such header entry is found, nil is +// returned. Intended for Docker compat. +func ExtractFileInfoFromHeader(header *http.Header) (*FileInfo, error) { + rawData := header.Get(XDockerContainerPathStatHeader) + if len(rawData) == 0 { + return nil, nil + } + + info := FileInfo{} + base64Decoder := base64.NewDecoder(base64.URLEncoding, strings.NewReader(rawData)) + if err := json.NewDecoder(base64Decoder).Decode(&info); err != nil { + return nil, err + } + + return &info, nil +} + +// ResolveHostPath resolves the specified, possibly relative, path on the host. +func ResolveHostPath(path string) (*FileInfo, error) { + resolvedHostPath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + resolvedHostPath = PreserveBasePath(path, resolvedHostPath) + + statInfo, err := os.Stat(resolvedHostPath) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrENOENT + } + return nil, err + } + + return &FileInfo{ + Name: statInfo.Name(), + Size: statInfo.Size(), + Mode: statInfo.Mode(), + ModTime: statInfo.ModTime(), + IsDir: statInfo.IsDir(), + LinkTarget: resolvedHostPath, + }, nil +} + +// PreserveBasePath makes sure that the original base path (e.g., "/" or "./") +// is preserved. The filepath API among tends to clean up a bit too much but +// we *must* preserve this data by all means. +func PreserveBasePath(original, resolved string) string { + // Handle "/" + if strings.HasSuffix(original, "/") { + if !strings.HasSuffix(resolved, "/") { + resolved += "/" + } + return resolved + } + + // Handle "/." + if strings.HasSuffix(original, "/.") { + if strings.HasSuffix(resolved, "/") { // could be root! + resolved += "." + } else if !strings.HasSuffix(resolved, "/.") { + resolved += "/." + } + return resolved + } + + return resolved +} diff --git a/vendor/github.com/containers/podman/v4/pkg/copy/parse.go b/vendor/github.com/containers/podman/v4/pkg/copy/parse.go new file mode 100644 index 00000000000..93edec5fa6a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/copy/parse.go @@ -0,0 +1,49 @@ +package copy + +import ( + "strings" + + "github.com/pkg/errors" +) + +// ParseSourceAndDestination parses the source and destination input into a +// possibly specified container and path. The input format is described in +// podman-cp(1) as "[nameOrID:]path". Colons in paths are supported as long +// they start with a dot or slash. +// +// It returns, in order, the source container and path, followed by the +// destination container and path, and an error. Note that exactly one +// container must be specified. +func ParseSourceAndDestination(source, destination string) (string, string, string, string, error) { + sourceContainer, sourcePath := parseUserInput(source) + destContainer, destPath := parseUserInput(destination) + + if len(sourcePath) == 0 || len(destPath) == 0 { + return "", "", "", "", errors.Errorf("invalid arguments %q, %q: you must specify paths", source, destination) + } + + return sourceContainer, sourcePath, destContainer, destPath, nil +} + +// parseUserInput parses the input string and returns, if specified, the name +// or ID of the container and the path. The input format is described in +// podman-cp(1) as "[nameOrID:]path". Colons in paths are supported as long +// they start with a dot or slash. +func parseUserInput(input string) (container string, path string) { + if len(input) == 0 { + return + } + path = input + + // If the input starts with a dot or slash, it cannot refer to a + // container. + if input[0] == '.' || input[0] == '/' { + return + } + + if spl := strings.SplitN(path, ":", 2); len(spl) == 2 { + container = spl[0] + path = spl[1] + } + return +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/auto-update.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/auto-update.go new file mode 100644 index 00000000000..5ea2cdf150e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/auto-update.go @@ -0,0 +1,31 @@ +package entities + +// AutoUpdateOptions are the options for running auto-update. +type AutoUpdateOptions struct { + // Authfile to use when contacting registries. + Authfile string + // Only check for but do not perform any update. If an update is + // pending, it will be indicated in the Updated field of + // AutoUpdateReport. + DryRun bool + // If restarting the service with the new image failed, restart it + // another time with the previous image. + Rollback bool +} + +// AutoUpdateReport contains the results from running auto-update. +type AutoUpdateReport struct { + // ID of the container *before* an update. + ContainerID string + // Name of the container *before* an update. + ContainerName string + // Name of the image. + ImageName string + // The configured auto-update policy. + Policy string + // SystemdUnit running a container configured for auto updates. + SystemdUnit string + // Indicates the update status: true, false, failed, pending (see + // DryRun). + Updated string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go new file mode 100644 index 00000000000..a5562e7c9ab --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go @@ -0,0 +1,200 @@ +package entities + +import ( + "sort" + "strings" + "time" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/pkg/ps/define" + "github.com/pkg/errors" +) + +// ListContainer describes a container suitable for listing +type ListContainer struct { + // AutoRemove + AutoRemove bool + // Container command + Command []string + // Container creation time + Created time.Time + // Human-readable container creation time. + CreatedAt string + // If container has exited/stopped + Exited bool + // Time container exited + ExitedAt int64 + // If container has exited, the return code from the command + ExitCode int32 + // The unique identifier for the container + ID string `json:"Id"` + // Container image + Image string + // Container image ID + ImageID string + // If this container is a Pod infra container + IsInfra bool + // Labels for container + Labels map[string]string + // User volume mounts + Mounts []string + // The names assigned to the container + Names []string + // Namespaces the container belongs to. Requires the + // namespace boolean to be true + Namespaces ListContainerNamespaces + // The network names assigned to the container + Networks []string + // The process id of the container + Pid int + // If the container is part of Pod, the Pod ID. Requires the pod + // boolean to be set + Pod string + // If the container is part of Pod, the Pod name. Requires the pod + // boolean to be set + PodName string + // Port mappings + Ports []types.PortMapping + // Size of the container rootfs. Requires the size boolean to be true + Size *define.ContainerSize + // Time when container started + StartedAt int64 + // State of container + State string + // Status is a human-readable approximation of a duration for json output + Status string +} + +// ListContainerNamespaces contains the identifiers of the container's Linux namespaces +type ListContainerNamespaces struct { + // Mount namespace + MNT string `json:"Mnt,omitempty"` + // Cgroup namespace + Cgroup string `json:"Cgroup,omitempty"` + // IPC namespace + IPC string `json:"Ipc,omitempty"` + // Network namespace + NET string `json:"Net,omitempty"` + // PID namespace + PIDNS string `json:"Pidns,omitempty"` + // UTS namespace + UTS string `json:"Uts,omitempty"` + // User namespace + User string `json:"User,omitempty"` +} + +type SortListContainers []ListContainer + +func (a SortListContainers) Len() int { return len(a) } +func (a SortListContainers) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type psSortedCommand struct{ SortListContainers } + +func (a psSortedCommand) Less(i, j int) bool { + return strings.Join(a.SortListContainers[i].Command, " ") < strings.Join(a.SortListContainers[j].Command, " ") +} + +type psSortedID struct{ SortListContainers } + +func (a psSortedID) Less(i, j int) bool { + return a.SortListContainers[i].ID < a.SortListContainers[j].ID +} + +type psSortedImage struct{ SortListContainers } + +func (a psSortedImage) Less(i, j int) bool { + return a.SortListContainers[i].Image < a.SortListContainers[j].Image +} + +type psSortedNames struct{ SortListContainers } + +func (a psSortedNames) Less(i, j int) bool { + return a.SortListContainers[i].Names[0] < a.SortListContainers[j].Names[0] +} + +type psSortedPod struct{ SortListContainers } + +func (a psSortedPod) Less(i, j int) bool { + return a.SortListContainers[i].Pod < a.SortListContainers[j].Pod +} + +type psSortedRunningFor struct{ SortListContainers } + +func (a psSortedRunningFor) Less(i, j int) bool { + return a.SortListContainers[i].StartedAt < a.SortListContainers[j].StartedAt +} + +type psSortedStatus struct{ SortListContainers } + +func (a psSortedStatus) Less(i, j int) bool { + return a.SortListContainers[i].State < a.SortListContainers[j].State +} + +type psSortedSize struct{ SortListContainers } + +func (a psSortedSize) Less(i, j int) bool { + if a.SortListContainers[i].Size == nil || a.SortListContainers[j].Size == nil { + return false + } + return a.SortListContainers[i].Size.RootFsSize < a.SortListContainers[j].Size.RootFsSize +} + +type PsSortedCreateTime struct{ SortListContainers } + +func (a PsSortedCreateTime) Less(i, j int) bool { + return a.SortListContainers[i].Created.Before(a.SortListContainers[j].Created) +} + +func SortPsOutput(sortBy string, psOutput SortListContainers) (SortListContainers, error) { + switch sortBy { + case "id": + sort.Sort(psSortedID{psOutput}) + case "image": + sort.Sort(psSortedImage{psOutput}) + case "command": + sort.Sort(psSortedCommand{psOutput}) + case "runningfor": + sort.Sort(psSortedRunningFor{psOutput}) + case "status": + sort.Sort(psSortedStatus{psOutput}) + case "size": + sort.Sort(psSortedSize{psOutput}) + case "names": + sort.Sort(psSortedNames{psOutput}) + case "created": + sort.Sort(PsSortedCreateTime{psOutput}) + case "pod": + sort.Sort(psSortedPod{psOutput}) + default: + return nil, errors.Errorf("invalid option for --sort, options are: command, created, id, image, names, runningfor, size, or status") + } + return psOutput, nil +} + +func (l ListContainer) CGROUPNS() string { + return l.Namespaces.Cgroup +} + +func (l ListContainer) IPC() string { + return l.Namespaces.IPC +} + +func (l ListContainer) MNT() string { + return l.Namespaces.MNT +} + +func (l ListContainer) NET() string { + return l.Namespaces.NET +} + +func (l ListContainer) PIDNS() string { + return l.Namespaces.PIDNS +} + +func (l ListContainer) USERNS() string { + return l.Namespaces.User +} + +func (l ListContainer) UTS() string { + return l.Namespaces.UTS +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go new file mode 100644 index 00000000000..1db8b9951d4 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go @@ -0,0 +1,481 @@ +package entities + +import ( + "io" + "net/url" + "os" + "time" + + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/image/v5/types" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/storage/pkg/archive" +) + +// ContainerRunlabelOptions are the options to execute container-runlabel. +type ContainerRunlabelOptions struct { + // Authfile - path to an authentication file. + Authfile string + // CertDir - path to a directory containing TLS certifications and + // keys. + CertDir string + // Credentials - `user:password` to use when pulling an image. + Credentials string + // Display - do not execute but print the command. + Display bool + // Replace - replace an existing container with a new one from the + // image. + Replace bool + // Name - use this name when executing the runlabel container. + Name string + // Optional1 - fist optional parameter for install. + Optional1 string + // Optional2 - second optional parameter for install. + Optional2 string + // Optional3 - third optional parameter for install. + Optional3 string + // Pull - pull the specified image if it's not in the local storage. + Pull bool + // Quiet - suppress output when pulling images. + Quiet bool + // SignaturePolicy - path to a signature-policy file. + SignaturePolicy string + // SkipTLSVerify - skip HTTPS and certificate verifications when + // contacting registries. + SkipTLSVerify types.OptionalBool +} + +// ContainerRunlabelReport contains the results from executing container-runlabel. +type ContainerRunlabelReport struct { +} + +type WaitOptions struct { + Condition []define.ContainerStatus + Interval time.Duration + Latest bool +} + +type WaitReport struct { + Id string //nolint + Error error + ExitCode int32 +} + +type BoolReport struct { + Value bool +} + +// StringSliceReport wraps a string slice. +type StringSliceReport struct { + Value []string +} + +type PauseUnPauseOptions struct { + All bool +} + +type PauseUnpauseReport struct { + Err error + Id string //nolint +} + +type StopOptions struct { + All bool + Ignore bool + Latest bool + Timeout *uint +} + +type StopReport struct { + Err error + Id string //nolint + RawInput string +} + +type TopOptions struct { + // CLI flags. + ListDescriptors bool + Latest bool + + // Options for the API. + Descriptors []string + NameOrID string +} + +type KillOptions struct { + All bool + Latest bool + Signal string +} + +type KillReport struct { + Err error + Id string //nolint + RawInput string +} + +type RestartOptions struct { + All bool + Latest bool + Running bool + Timeout *uint +} + +type RestartReport struct { + Err error + Id string //nolint +} + +type RmOptions struct { + All bool + Depend bool + Force bool + Ignore bool + Latest bool + Timeout *uint + Volumes bool +} + +type ContainerInspectReport struct { + *define.InspectContainerData +} + +type ContainerStatReport struct { + define.FileInfo +} + +type CommitOptions struct { + Author string + Changes []string + Format string + ImageName string + IncludeVolumes bool + Message string + Pause bool + Quiet bool + Squash bool + Writer io.Writer +} + +type CopyOptions struct { + // If used with ContainerCopyFromArchive and set to true + // it will change ownership of files from the source tar archive + // to the primary uid/gid of the destination container. + Chown bool + // Map to translate path names. + Rename map[string]string +} + +type CommitReport struct { + Id string //nolint +} + +type ContainerExportOptions struct { + Output string +} + +type CheckpointOptions struct { + All bool + Export string + CreateImage string + IgnoreRootFS bool + IgnoreVolumes bool + Keep bool + Latest bool + LeaveRunning bool + TCPEstablished bool + PreCheckPoint bool + WithPrevious bool + Compression archive.Compression + PrintStats bool + FileLocks bool +} + +type CheckpointReport struct { + Err error `json:"-"` + Id string `json:"Id` //nolint + RuntimeDuration int64 `json:"runtime_checkpoint_duration"` + CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` +} + +type RestoreOptions struct { + All bool + IgnoreRootFS bool + IgnoreVolumes bool + IgnoreStaticIP bool + IgnoreStaticMAC bool + Import string + CheckpointImage bool + Keep bool + Latest bool + Name string + TCPEstablished bool + ImportPrevious string + PublishPorts []string + Pod string + PrintStats bool + FileLocks bool +} + +type RestoreReport struct { + Err error `json:"-"` + Id string `json:"Id` //nolint + RuntimeDuration int64 `json:"runtime_restore_duration"` + CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` +} + +type ContainerCreateReport struct { + Id string //nolint +} + +// AttachOptions describes the cli and other values +// needed to perform an attach +type AttachOptions struct { + DetachKeys string + Latest bool + NoStdin bool + SigProxy bool + Stdin *os.File + Stdout *os.File + Stderr *os.File +} + +// ContainerLogsOptions describes the options to extract container logs. +type ContainerLogsOptions struct { + // Show extra details provided to the logs. + Details bool + // Follow the log output. + Follow bool + // Display logs for the latest container only. Ignored on the remote client. + Latest bool + // Show container names in the output. + Names bool + // Show logs since this timestamp. + Since time.Time + // Show logs until this timestamp. + Until time.Time + // Number of lines to display at the end of the output. + Tail int64 + // Show timestamps in the logs. + Timestamps bool + // Show different colors in the logs. + Colors bool + // Write the stdout to this Writer. + StdoutWriter io.Writer + // Write the stderr to this Writer. + StderrWriter io.Writer +} + +// ExecOptions describes the cli values to exec into +// a container +type ExecOptions struct { + Cmd []string + DetachKeys string + Envs map[string]string + Interactive bool + Latest bool + PreserveFDs uint + Privileged bool + Tty bool + User string + WorkDir string +} + +// ContainerExistsOptions describes the cli values to check if a container exists +type ContainerExistsOptions struct { + External bool +} + +// ContainerStartOptions describes the val from the +// CLI needed to start a container +type ContainerStartOptions struct { + Filters map[string][]string + All bool + Attach bool + DetachKeys string + Interactive bool + Latest bool + SigProxy bool + Stdout *os.File + Stderr *os.File + Stdin *os.File +} + +// ContainerStartReport describes the response from starting +// containers from the cli +type ContainerStartReport struct { + Id string //nolint + RawInput string + Err error + ExitCode int +} + +// ContainerListOptions describes the CLI options +// for listing containers +type ContainerListOptions struct { + All bool + Filters map[string][]string + Format string + Last int + Latest bool + Namespace bool + Pod bool + Quiet bool + Size bool + External bool + Sort string + Sync bool + Watch uint +} + +// ContainerRunOptions describes the options needed +// to run a container from the CLI +type ContainerRunOptions struct { + CIDFile string + Detach bool + DetachKeys string + ErrorStream *os.File + InputStream *os.File + OutputStream *os.File + PreserveFDs uint + Rm bool + SigProxy bool + Spec *specgen.SpecGenerator + Passwd bool +} + +// ContainerRunReport describes the results of running +// a container +type ContainerRunReport struct { + ExitCode int + Id string //nolint +} + +// ContainerCleanupOptions are the CLI values for the +// cleanup command +type ContainerCleanupOptions struct { + All bool + Exec string + Latest bool + Remove bool + RemoveImage bool +} + +// ContainerCleanupReport describes the response from a +// container cleanup +type ContainerCleanupReport struct { + CleanErr error + Id string //nolint + RmErr error + RmiErr error +} + +// ContainerInitOptions describes input options +// for the container init cli +type ContainerInitOptions struct { + All bool + Latest bool +} + +// ContainerInitReport describes the results of a +// container init +type ContainerInitReport struct { + Err error + Id string //nolint +} + +// ContainerMountOptions describes the input values for mounting containers +// in the CLI +type ContainerMountOptions struct { + All bool + Format string + Latest bool + NoTruncate bool +} + +// ContainerUnmountOptions are the options from the cli for unmounting +type ContainerUnmountOptions struct { + All bool + Force bool + Latest bool +} + +// ContainerMountReport describes the response from container mount +type ContainerMountReport struct { + Err error + Id string //nolint + Name string + Path string +} + +// ContainerUnmountReport describes the response from umounting a container +type ContainerUnmountReport struct { + Err error + Id string //nolint +} + +// ContainerPruneOptions describes the options needed +// to prune a container from the CLI +type ContainerPruneOptions struct { + Filters url.Values `json:"filters" schema:"filters"` +} + +// ContainerPortOptions describes the options to obtain +// port information on containers +type ContainerPortOptions struct { + All bool + Latest bool +} + +// ContainerPortReport describes the output needed for +// the CLI to output ports +type ContainerPortReport struct { + Id string //nolint + Ports []nettypes.PortMapping +} + +// ContainerCpOptions describes input options for cp. +type ContainerCpOptions struct { + // Pause the container while copying. + Pause bool + // Extract the tarfile into the destination directory. + Extract bool +} + +// ContainerStatsOptions describes input options for getting +// stats on containers +type ContainerStatsOptions struct { + // Operate on the latest known container. Only supported for local + // clients. + Latest bool + // Stream stats. + Stream bool + // Interval in seconds + Interval int +} + +// ContainerStatsReport is used for streaming container stats. +type ContainerStatsReport struct { + // Error from reading stats. + Error error + // Results, set when there is no error. + Stats []define.ContainerStats +} + +// ContainerRenameOptions describes input options for renaming a container. +type ContainerRenameOptions struct { + // NewName is the new name that will be given to the container. + NewName string +} + +// ContainerCloneOptions contains options for cloning an existing continer +type ContainerCloneOptions struct { + ID string + Destroy bool + CreateOpts ContainerCreateOptions + Image string + RawImageName string + Run bool + Force bool +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go new file mode 100644 index 00000000000..32faa74afd8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go @@ -0,0 +1,55 @@ +package entities + +import ( + "github.com/containers/common/pkg/config" + "github.com/spf13/pflag" +) + +// EngineMode is the connection type podman is using to access libpod +type EngineMode string + +// EngineSetup calls out whether a "normal" or specialized engine should be created +type EngineSetup string + +const ( + ABIMode = EngineMode("abi") + TunnelMode = EngineMode("tunnel") + + MigrateMode = EngineSetup("migrate") + NoFDsMode = EngineSetup("disablefds") + NormalMode = EngineSetup("normal") + RenumberMode = EngineSetup("renumber") + ResetMode = EngineSetup("reset") +) + +// Convert EngineMode to String +func (m EngineMode) String() string { + return string(m) +} + +// PodmanConfig combines the defaults and settings from the file system with the +// flags given in os.Args. Some runtime state is also stored here. +type PodmanConfig struct { + *config.Config + *pflag.FlagSet + + CgroupUsage string // rootless code determines Usage message + ConmonPath string // --conmon flag will set Engine.ConmonPath + CPUProfile string // Hidden: Should CPU profile be taken + EngineMode EngineMode // ABI or Tunneling mode + Identity string // ssh identity for connecting to server + MaxWorks int // maximum number of parallel threads + MemoryProfile string // Hidden: Should memory profile be taken + NoOut bool // Don't output to stdout + RegistriesConf string // allows for specifying a custom registries.conf + Remote bool // Connection to Podman API Service will use RESTful API + RuntimePath string // --runtime flag will set Engine.RuntimePath + RuntimeFlags []string // global flags for the container runtime + Syslog bool // write to StdOut and Syslog, not supported when tunneling + Trace bool // Hidden: Trace execution + URI string // URI to RESTful API Service + + Runroot string + StorageDriver string + StorageOpts []string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go new file mode 100644 index 00000000000..6b70a34524a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go @@ -0,0 +1,106 @@ +package entities + +import ( + "context" + "io" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v4/pkg/specgen" +) + +type ContainerCopyFunc func() error + +type ContainerEngine interface { + AutoUpdate(ctx context.Context, options AutoUpdateOptions) ([]*AutoUpdateReport, []error) + Config(ctx context.Context) (*config.Config, error) + ContainerAttach(ctx context.Context, nameOrID string, options AttachOptions) error + ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error) + ContainerCleanup(ctx context.Context, namesOrIds []string, options ContainerCleanupOptions) ([]*ContainerCleanupReport, error) + ContainerClone(ctx context.Context, ctrClone ContainerCloneOptions) (*ContainerCreateReport, error) + ContainerCommit(ctx context.Context, nameOrID string, options CommitOptions) (*CommitReport, error) + ContainerCopyFromArchive(ctx context.Context, nameOrID, path string, reader io.Reader, options CopyOptions) (ContainerCopyFunc, error) + ContainerCopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (ContainerCopyFunc, error) + ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*ContainerCreateReport, error) + ContainerExec(ctx context.Context, nameOrID string, options ExecOptions, streams define.AttachStreams) (int, error) + ContainerExecDetached(ctx context.Context, nameOrID string, options ExecOptions) (string, error) + ContainerExists(ctx context.Context, nameOrID string, options ContainerExistsOptions) (*BoolReport, error) + ContainerExport(ctx context.Context, nameOrID string, options ContainerExportOptions) error + ContainerInit(ctx context.Context, namesOrIds []string, options ContainerInitOptions) ([]*ContainerInitReport, error) + ContainerInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]*ContainerInspectReport, []error, error) + ContainerKill(ctx context.Context, namesOrIds []string, options KillOptions) ([]*KillReport, error) + ContainerList(ctx context.Context, options ContainerListOptions) ([]ListContainer, error) + ContainerListExternal(ctx context.Context) ([]ListContainer, error) + ContainerLogs(ctx context.Context, containers []string, options ContainerLogsOptions) error + ContainerMount(ctx context.Context, nameOrIDs []string, options ContainerMountOptions) ([]*ContainerMountReport, error) + ContainerPause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error) + ContainerPort(ctx context.Context, nameOrID string, options ContainerPortOptions) ([]*ContainerPortReport, error) + ContainerPrune(ctx context.Context, options ContainerPruneOptions) ([]*reports.PruneReport, error) + ContainerRename(ctr context.Context, nameOrID string, options ContainerRenameOptions) error + ContainerRestart(ctx context.Context, namesOrIds []string, options RestartOptions) ([]*RestartReport, error) + ContainerRestore(ctx context.Context, namesOrIds []string, options RestoreOptions) ([]*RestoreReport, error) + ContainerRm(ctx context.Context, namesOrIds []string, options RmOptions) ([]*reports.RmReport, error) + ContainerRun(ctx context.Context, opts ContainerRunOptions) (*ContainerRunReport, error) + ContainerRunlabel(ctx context.Context, label string, image string, args []string, opts ContainerRunlabelOptions) error + ContainerStart(ctx context.Context, namesOrIds []string, options ContainerStartOptions) ([]*ContainerStartReport, error) + ContainerStat(ctx context.Context, nameOrDir string, path string) (*ContainerStatReport, error) + ContainerStats(ctx context.Context, namesOrIds []string, options ContainerStatsOptions) (chan ContainerStatsReport, error) + ContainerStop(ctx context.Context, namesOrIds []string, options StopOptions) ([]*StopReport, error) + ContainerTop(ctx context.Context, options TopOptions) (*StringSliceReport, error) + ContainerUnmount(ctx context.Context, nameOrIDs []string, options ContainerUnmountOptions) ([]*ContainerUnmountReport, error) + ContainerUnpause(ctx context.Context, namesOrIds []string, options PauseUnPauseOptions) ([]*PauseUnpauseReport, error) + ContainerWait(ctx context.Context, namesOrIds []string, options WaitOptions) ([]WaitReport, error) + Diff(ctx context.Context, namesOrIds []string, options DiffOptions) (*DiffReport, error) + Events(ctx context.Context, opts EventsOptions) error + GenerateSystemd(ctx context.Context, nameOrID string, opts GenerateSystemdOptions) (*GenerateSystemdReport, error) + GenerateKube(ctx context.Context, nameOrIDs []string, opts GenerateKubeOptions) (*GenerateKubeReport, error) + SystemPrune(ctx context.Context, options SystemPruneOptions) (*SystemPruneReport, error) + HealthCheckRun(ctx context.Context, nameOrID string, options HealthCheckOptions) (*define.HealthCheckResults, error) + Info(ctx context.Context) (*define.Info, error) + NetworkConnect(ctx context.Context, networkname string, options NetworkConnectOptions) error + NetworkCreate(ctx context.Context, network types.Network) (*types.Network, error) + NetworkDisconnect(ctx context.Context, networkname string, options NetworkDisconnectOptions) error + NetworkExists(ctx context.Context, networkname string) (*BoolReport, error) + NetworkInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]types.Network, []error, error) + NetworkList(ctx context.Context, options NetworkListOptions) ([]types.Network, error) + NetworkPrune(ctx context.Context, options NetworkPruneOptions) ([]*NetworkPruneReport, error) + NetworkReload(ctx context.Context, names []string, options NetworkReloadOptions) ([]*NetworkReloadReport, error) + NetworkRm(ctx context.Context, namesOrIds []string, options NetworkRmOptions) ([]*NetworkRmReport, error) + PlayKube(ctx context.Context, body io.Reader, opts PlayKubeOptions) (*PlayKubeReport, error) + PlayKubeDown(ctx context.Context, body io.Reader, opts PlayKubeDownOptions) (*PlayKubeReport, error) + PodCreate(ctx context.Context, specg PodSpec) (*PodCreateReport, error) + PodExists(ctx context.Context, nameOrID string) (*BoolReport, error) + PodInspect(ctx context.Context, options PodInspectOptions) (*PodInspectReport, error) + PodKill(ctx context.Context, namesOrIds []string, options PodKillOptions) ([]*PodKillReport, error) + PodLogs(ctx context.Context, pod string, options PodLogsOptions) error + PodPause(ctx context.Context, namesOrIds []string, options PodPauseOptions) ([]*PodPauseReport, error) + PodPrune(ctx context.Context, options PodPruneOptions) ([]*PodPruneReport, error) + PodPs(ctx context.Context, options PodPSOptions) ([]*ListPodsReport, error) + PodRestart(ctx context.Context, namesOrIds []string, options PodRestartOptions) ([]*PodRestartReport, error) + PodRm(ctx context.Context, namesOrIds []string, options PodRmOptions) ([]*PodRmReport, error) + PodStart(ctx context.Context, namesOrIds []string, options PodStartOptions) ([]*PodStartReport, error) + PodStats(ctx context.Context, namesOrIds []string, options PodStatsOptions) ([]*PodStatsReport, error) + PodStop(ctx context.Context, namesOrIds []string, options PodStopOptions) ([]*PodStopReport, error) + PodTop(ctx context.Context, options PodTopOptions) (*StringSliceReport, error) + PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error) + SetupRootless(ctx context.Context, noMoveProcess bool) error + SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error) + SecretInspect(ctx context.Context, nameOrIDs []string) ([]*SecretInfoReport, []error, error) + SecretList(ctx context.Context, opts SecretListRequest) ([]*SecretInfoReport, error) + SecretRm(ctx context.Context, nameOrID []string, opts SecretRmOptions) ([]*SecretRmReport, error) + Shutdown(ctx context.Context) + SystemDf(ctx context.Context, options SystemDfOptions) (*SystemDfReport, error) + Unshare(ctx context.Context, args []string, options SystemUnshareOptions) error + Version(ctx context.Context) (*SystemVersionReport, error) + VolumeCreate(ctx context.Context, opts VolumeCreateOptions) (*IDOrNameResponse, error) + VolumeExists(ctx context.Context, namesOrID string) (*BoolReport, error) + VolumeMounted(ctx context.Context, namesOrID string) (*BoolReport, error) + VolumeInspect(ctx context.Context, namesOrIds []string, opts InspectOptions) ([]*VolumeInspectReport, []error, error) + VolumeList(ctx context.Context, opts VolumeListOptions) ([]*VolumeListReport, error) + VolumeMount(ctx context.Context, namesOrIds []string) ([]*VolumeMountReport, error) + VolumePrune(ctx context.Context, options VolumePruneOptions) ([]*reports.PruneReport, error) + VolumeRm(ctx context.Context, namesOrIds []string, opts VolumeRmOptions) ([]*VolumeRmReport, error) + VolumeUnmount(ctx context.Context, namesOrIds []string) ([]*VolumeUnmountReport, error) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go new file mode 100644 index 00000000000..5011d82aa79 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go @@ -0,0 +1,43 @@ +package entities + +import ( + "context" + + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v4/pkg/domain/entities/reports" +) + +type ImageEngine interface { + Build(ctx context.Context, containerFiles []string, opts BuildOptions) (*BuildReport, error) + Config(ctx context.Context) (*config.Config, error) + Exists(ctx context.Context, nameOrID string) (*BoolReport, error) + History(ctx context.Context, nameOrID string, opts ImageHistoryOptions) (*ImageHistoryReport, error) + Import(ctx context.Context, opts ImageImportOptions) (*ImageImportReport, error) + Inspect(ctx context.Context, namesOrIDs []string, opts InspectOptions) ([]*ImageInspectReport, []error, error) + List(ctx context.Context, opts ImageListOptions) ([]*ImageSummary, error) + Load(ctx context.Context, opts ImageLoadOptions) (*ImageLoadReport, error) + Mount(ctx context.Context, images []string, options ImageMountOptions) ([]*ImageMountReport, error) + Prune(ctx context.Context, opts ImagePruneOptions) ([]*reports.PruneReport, error) + Pull(ctx context.Context, rawImage string, opts ImagePullOptions) (*ImagePullReport, error) + Push(ctx context.Context, source string, destination string, opts ImagePushOptions) error + Remove(ctx context.Context, images []string, opts ImageRemoveOptions) (*ImageRemoveReport, []error) + Save(ctx context.Context, nameOrID string, tags []string, options ImageSaveOptions) error + Search(ctx context.Context, term string, opts ImageSearchOptions) ([]ImageSearchReport, error) + SetTrust(ctx context.Context, args []string, options SetTrustOptions) error + ShowTrust(ctx context.Context, args []string, options ShowTrustOptions) (*ShowTrustReport, error) + Shutdown(ctx context.Context) + Tag(ctx context.Context, nameOrID string, tags []string, options ImageTagOptions) error + Transfer(ctx context.Context, source ImageScpOptions, dest ImageScpOptions, parentFlags []string) error + Tree(ctx context.Context, nameOrID string, options ImageTreeOptions) (*ImageTreeReport, error) + Unmount(ctx context.Context, images []string, options ImageUnmountOptions) ([]*ImageUnmountReport, error) + Untag(ctx context.Context, nameOrID string, tags []string, options ImageUntagOptions) error + ManifestCreate(ctx context.Context, name string, images []string, opts ManifestCreateOptions) (string, error) + ManifestExists(ctx context.Context, name string) (*BoolReport, error) + ManifestInspect(ctx context.Context, name string) ([]byte, error) + ManifestAdd(ctx context.Context, listName string, imageNames []string, opts ManifestAddOptions) (string, error) + ManifestAnnotate(ctx context.Context, names, image string, opts ManifestAnnotateOptions) (string, error) + ManifestRemoveDigest(ctx context.Context, names, image string) (string, error) + ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error) + ManifestPush(ctx context.Context, name, destination string, imagePushOpts ImagePushOptions) (string, error) + Sign(ctx context.Context, names []string, options SignOptions) (*SignReport, error) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_system.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_system.go new file mode 100644 index 00000000000..a0ecfe9ea0c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_system.go @@ -0,0 +1,14 @@ +package entities + +import ( + "context" + + "github.com/spf13/pflag" +) + +type SystemEngine interface { + Renumber(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig) error + Migrate(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig, options SystemMigrateOptions) error + Reset(ctx context.Context) error + Shutdown(ctx context.Context) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go new file mode 100644 index 00000000000..d8ba0f1d3ea --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go @@ -0,0 +1,77 @@ +package entities + +import ( + "strconv" + "time" + + libpodEvents "github.com/containers/podman/v4/libpod/events" + dockerEvents "github.com/docker/docker/api/types/events" +) + +// Event combines various event-related data such as time, event type, status +// and more. +type Event struct { + // TODO: it would be nice to have full control over the types at some + // point and fork such Docker types. + dockerEvents.Message +} + +// ConvertToLibpodEvent converts an entities event to a libpod one. +func ConvertToLibpodEvent(e Event) *libpodEvents.Event { + exitCode, err := strconv.Atoi(e.Actor.Attributes["containerExitCode"]) + if err != nil { + return nil + } + status, err := libpodEvents.StringToStatus(e.Action) + if err != nil { + return nil + } + t, err := libpodEvents.StringToType(e.Type) + if err != nil { + return nil + } + image := e.Actor.Attributes["image"] + name := e.Actor.Attributes["name"] + details := e.Actor.Attributes + delete(details, "image") + delete(details, "name") + delete(details, "containerExitCode") + return &libpodEvents.Event{ + ContainerExitCode: exitCode, + ID: e.Actor.ID, + Image: image, + Name: name, + Status: status, + Time: time.Unix(0, e.TimeNano), + Type: t, + Details: libpodEvents.Details{ + Attributes: details, + }, + } +} + +// ConvertToEntitiesEvent converts a libpod event to an entities one. +func ConvertToEntitiesEvent(e libpodEvents.Event) *Event { + attributes := e.Details.Attributes + if attributes == nil { + attributes = make(map[string]string) + } + attributes["image"] = e.Image + attributes["name"] = e.Name + attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode) + return &Event{dockerEvents.Message{ + // Compatibility with clients that still look for deprecated API elements + Status: e.Status.String(), + ID: e.ID, + From: e.Image, + Type: e.Type.String(), + Action: e.Status.String(), + Actor: dockerEvents.Actor{ + ID: e.ID, + Attributes: attributes, + }, + Scope: "local", + Time: e.Time.Unix(), + TimeNano: e.Time.UnixNano(), + }} +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/filters.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/filters.go new file mode 100644 index 00000000000..a42c5cd244b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/filters.go @@ -0,0 +1,144 @@ +package entities + +import ( + "net/url" + "strings" +) + +// Identifier interface allows filters to access ID() of object +type Identifier interface { + Id() string +} + +// Named interface allows filters to access Name() of object +type Named interface { + Name() string +} + +// Names interface allows filters to access Name() of object +type Names interface { + Names() []string +} + +// IDOrNamed interface allows filters to access ID() or Name() of object +type IDOrNamed interface { + Identifier + Named +} + +type ImageFilter func(Image) bool +type VolumeFilter func(Volume) bool +type ContainerFilter func(Container) bool + +func CompileImageFilters(filters url.Values) ImageFilter { + var fns []interface{} + + for name, targets := range filters { + switch name { + case "id": + fns = append(fns, FilterIDFn(targets)) + case "name": + fns = append(fns, FilterNamesFn(targets)) + case "idOrName": + fns = append(fns, FilterIDOrNameFn(targets)) + } + } + + return func(image Image) bool { + for _, fn := range fns { + if !fn.(ImageFilter)(image) { + return false + } + } + return true + } +} + +func CompileContainerFilters(filters url.Values) ContainerFilter { + var fns []interface{} + + for name, targets := range filters { + switch name { + case "id": + fns = append(fns, FilterIDFn(targets)) + case "name": + fns = append(fns, FilterNameFn(targets)) + case "idOrName": + fns = append(fns, FilterIDOrNameFn(targets)) + } + } + + return func(ctnr Container) bool { + for _, fn := range fns { + if !fn.(ContainerFilter)(ctnr) { + return false + } + } + return true + } +} + +func CompileVolumeFilters(filters url.Values) VolumeFilter { + var fns []interface{} + + for name, targets := range filters { + if name == "id" { + fns = append(fns, FilterIDFn(targets)) + } + } + + return func(volume Volume) bool { + for _, fn := range fns { + if !fn.(VolumeFilter)(volume) { + return false + } + } + return true + } +} + +func FilterIDFn(id []string) func(Identifier) bool { + return func(obj Identifier) bool { + for _, v := range id { + if strings.Contains(obj.Id(), v) { + return true + } + } + return false + } +} + +func FilterNameFn(name []string) func(Named) bool { + return func(obj Named) bool { + for _, v := range name { + if strings.Contains(obj.Name(), v) { + return true + } + } + return false + } +} + +func FilterNamesFn(name []string) func(Names) bool { + return func(obj Names) bool { + for _, v := range name { + for _, n := range obj.Names() { + if strings.Contains(n, v) { + return true + } + } + } + return false + } +} + +func FilterIDOrNameFn(id []string) func(IDOrNamed) bool { + return func(obj IDOrNamed) bool { + for _, v := range id { + if strings.Contains(obj.Id(), v) || strings.Contains(obj.Name(), v) { + return true + } + } + return false + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go new file mode 100644 index 00000000000..73dd64ecd02 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go @@ -0,0 +1,55 @@ +package entities + +import "io" + +// GenerateSystemdOptions control the generation of systemd unit files. +type GenerateSystemdOptions struct { + // Name - use container/pod name instead of its ID. + Name bool + // New - create a new container instead of starting a new one. + New bool + // RestartPolicy - systemd restart policy. + RestartPolicy *string + // RestartSec - systemd service restartsec. Configures the time to sleep before restarting a service. + RestartSec *uint + // StartTimeout - time when starting the container. + StartTimeout *uint + // StopTimeout - time when stopping the container. + StopTimeout *uint + // ContainerPrefix - systemd unit name prefix for containers + ContainerPrefix string + // PodPrefix - systemd unit name prefix for pods + PodPrefix string + // Separator - systemd unit name separator between name/id and prefix + Separator string + // NoHeader - skip header generation + NoHeader bool + // TemplateUnitFile - make use of %i and %I to differentiate between the different instances of the unit + TemplateUnitFile bool + // Wants - systemd wants list for the container or pods + Wants []string + // After - systemd after list for the container or pods + After []string + // Requires - systemd requires list for the container or pods + Requires []string +} + +// GenerateSystemdReport +type GenerateSystemdReport struct { + // Units of the generate process. key = unit name -> value = unit content + Units map[string]string +} + +// GenerateKubeOptions control the generation of Kubernetes YAML files. +type GenerateKubeOptions struct { + // Service - generate YAML for a Kubernetes _service_ object. + Service bool +} + +// GenerateKubeReport +// +// FIXME: Podman4.0 should change io.Reader to io.ReaderCloser +type GenerateKubeReport struct { + // Reader - the io.Reader to reader the generated YAML file. + Reader io.Reader +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/healthcheck.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/healthcheck.go new file mode 100644 index 00000000000..a880805f9d8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/healthcheck.go @@ -0,0 +1,3 @@ +package entities + +type HealthCheckOptions struct{} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go new file mode 100644 index 00000000000..7081c5d25a5 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go @@ -0,0 +1,412 @@ +package entities + +import ( + "net/url" + "time" + + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/containers/podman/v4/pkg/inspect" + "github.com/containers/podman/v4/pkg/trust" + "github.com/docker/docker/api/types/container" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Image struct { + ID string `json:"Id"` + RepoTags []string `json:",omitempty"` + RepoDigests []string `json:",omitempty"` + Parent string `json:",omitempty"` + Comment string `json:",omitempty"` + Created string `json:",omitempty"` + Container string `json:",omitempty"` + ContainerConfig *container.Config `json:",omitempty"` + DockerVersion string `json:",omitempty"` + Author string `json:",omitempty"` + Config *container.Config `json:",omitempty"` + Architecture string `json:",omitempty"` + Variant string `json:",omitempty"` + Os string `json:",omitempty"` + OsVersion string `json:",omitempty"` + Size int64 `json:",omitempty"` + VirtualSize int64 `json:",omitempty"` + GraphDriver string `json:",omitempty"` + RootFS string `json:",omitempty"` + Metadata string `json:",omitempty"` + + // Podman extensions + Digest digest.Digest `json:",omitempty"` + PodmanVersion string `json:",omitempty"` + ManifestType string `json:",omitempty"` + User string `json:",omitempty"` + History []v1.History `json:",omitempty"` + NamesHistory []string `json:",omitempty"` + HealthCheck *manifest.Schema2HealthConfig `json:",omitempty"` +} + +func (i *Image) Id() string { // nolint + return i.ID +} + +// swagger:model LibpodImageSummary +type ImageSummary struct { + ID string `json:"Id"` + ParentId string // nolint + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + SharedSize int + VirtualSize int64 + Labels map[string]string + Containers int + ReadOnly bool `json:",omitempty"` + Dangling bool `json:",omitempty"` + + // Podman extensions + Names []string `json:",omitempty"` + Digest string `json:",omitempty"` + ConfigDigest string `json:",omitempty"` + History []string `json:",omitempty"` +} + +func (i *ImageSummary) Id() string { // nolint + return i.ID +} + +func (i *ImageSummary) IsReadOnly() bool { + return i.ReadOnly +} + +func (i *ImageSummary) IsDangling() bool { + return i.Dangling +} + +// ImageRemoveOptions can be used to alter image removal. +type ImageRemoveOptions struct { + // All will remove all images. + All bool + // Foce will force image removal including containers using the images. + Force bool + // Ignore if a specified image does not exist and do not throw an error. + Ignore bool + // Confirms if given name is a manifest list and removes it, otherwise returns error. + LookupManifest bool +} + +// ImageRemoveReport is the response for removing one or more image(s) from storage +// and images what was untagged vs actually removed. +type ImageRemoveReport struct { + // Deleted images. + Deleted []string `json:",omitempty"` + // Untagged images. Can be longer than Deleted. + Untagged []string `json:",omitempty"` + // ExitCode describes the exit codes as described in the `podman rmi` + // man page. + ExitCode int +} + +type ImageHistoryOptions struct{} + +type ImageHistoryLayer struct { + ID string `json:"id"` + Created time.Time `json:"created,omitempty"` + CreatedBy string `json:",omitempty"` + Tags []string `json:"tags,omitempty"` + Size int64 `json:"size"` + Comment string `json:"comment,omitempty"` +} + +type ImageHistoryReport struct { + Layers []ImageHistoryLayer +} + +// ImagePullOptions are the arguments for pulling images. +type ImagePullOptions struct { + // AllTags can be specified to pull all tags of an image. Note + // that this only works if the image does not include a tag. + AllTags bool + // Authfile is the path to the authentication file. Ignored for remote + // calls. + Authfile string + // CertDir is the path to certificate directories. Ignored for remote + // calls. + CertDir string + // Username for authenticating against the registry. + Username string + // Password for authenticating against the registry. + Password string + // Arch will overwrite the local architecture for image pulls. + Arch string + // OS will overwrite the local operating system (OS) for image + // pulls. + OS string + // Variant will overwrite the local variant for image pulls. + Variant string + // Quiet can be specified to suppress pull progress when pulling. Ignored + // for remote calls. + Quiet bool + // SignaturePolicy to use when pulling. Ignored for remote calls. + SignaturePolicy string + // SkipTLSVerify to skip HTTPS and certificate verification. + SkipTLSVerify types.OptionalBool + // PullPolicy whether to pull new image + PullPolicy config.PullPolicy +} + +// ImagePullReport is the response from pulling one or more images. +type ImagePullReport struct { + // Stream used to provide output from c/image + Stream string `json:"stream,omitempty"` + // Error contains text of errors from c/image + Error string `json:"error,omitempty"` + // Images contains the ID's of the images pulled + Images []string `json:"images,omitempty"` + // ID contains image id (retained for backwards compatibility) + ID string `json:"id,omitempty"` +} + +// ImagePushOptions are the arguments for pushing images. +type ImagePushOptions struct { + // All indicates that all images referenced in an manifest list should be pushed + All bool + // Authfile is the path to the authentication file. Ignored for remote + // calls. + Authfile string + // CertDir is the path to certificate directories. Ignored for remote + // calls. + CertDir string + // Compress tarball image layers when pushing to a directory using the 'dir' + // transport. Default is same compression type as source. Ignored for remote + // calls. + Compress bool + // Username for authenticating against the registry. + Username string + // Password for authenticating against the registry. + Password string + // DigestFile, after copying the image, write the digest of the resulting + // image to the file. Ignored for remote calls. + DigestFile string + // Format is the Manifest type (oci, v2s1, or v2s2) to use when pushing an + // image. Default is manifest type of source, with fallbacks. + // Ignored for remote calls. + Format string + // Quiet can be specified to suppress pull progress when pulling. Ignored + // for remote calls. + Quiet bool + // Rm indicates whether to remove the manifest list if push succeeds + Rm bool + // RemoveSignatures, discard any pre-existing signatures in the image. + // Ignored for remote calls. + RemoveSignatures bool + // SignaturePolicy to use when pulling. Ignored for remote calls. + SignaturePolicy string + // SignBy adds a signature at the destination using the specified key. + // Ignored for remote calls. + SignBy string + // SkipTLSVerify to skip HTTPS and certificate verification. + SkipTLSVerify types.OptionalBool + // Progress to get progress notifications + Progress chan types.ProgressProperties + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat string +} + +// ImageSearchOptions are the arguments for searching images. +type ImageSearchOptions struct { + // Authfile is the path to the authentication file. Ignored for remote + // calls. + Authfile string + // Filters for the search results. + Filters []string + // Limit the number of results. + Limit int + // SkipTLSVerify to skip HTTPS and certificate verification. + SkipTLSVerify types.OptionalBool + // ListTags search the available tags of the repository + ListTags bool +} + +// ImageSearchReport is the response from searching images. +type ImageSearchReport struct { + // Index is the image index (e.g., "docker.io" or "quay.io") + Index string + // Name is the canonical name of the image (e.g., "docker.io/library/alpine"). + Name string + // Description of the image. + Description string + // Stars is the number of stars of the image. + Stars int + // Official indicates if it's an official image. + Official string + // Automated indicates if the image was created by an automated build. + Automated string + // Tag is the repository tag + Tag string +} + +// Image List Options +type ImageListOptions struct { + All bool `json:"all" schema:"all"` + Filter []string `json:"Filter,omitempty"` +} + +type ImagePruneOptions struct { + All bool `json:"all" schema:"all"` + External bool `json:"external" schema:"external"` + Filter []string `json:"filter" schema:"filter"` +} + +type ImageTagOptions struct{} +type ImageUntagOptions struct{} + +// ImageInspectReport is the data when inspecting an image. +type ImageInspectReport struct { + *inspect.ImageData +} + +type ImageLoadOptions struct { + Input string + Quiet bool + SignaturePolicy string +} + +type ImageLoadReport struct { + Names []string +} + +type ImageImportOptions struct { + Architecture string + Variant string + Changes []string + Message string + OS string + Quiet bool + Reference string + SignaturePolicy string + Source string + SourceIsURL bool +} + +type ImageImportReport struct { + Id string // nolint +} + +// ImageSaveOptions provide options for saving images. +type ImageSaveOptions struct { + // Compress layers when saving to a directory. + Compress bool + // Format of saving the image: oci-archive, oci-dir (directory with oci + // manifest type), docker-archive, docker-dir (directory with v2s2 + // manifest type). + Format string + // MultiImageArchive denotes if the created archive shall include more + // than one image. Additional tags will be interpreted as references + // to images which are added to the archive. + MultiImageArchive bool + // Accept uncompressed layers when copying OCI images. + OciAcceptUncompressedLayers bool + // Output - write image to the specified path. + Output string + // Quiet - suppress output when copying images + Quiet bool +} + +// ImageScpOptions provide options for securely copying images to and from a remote host +type ImageScpOptions struct { + // Remote determines if this entity is operating on a remote machine + Remote bool `json:"remote,omitempty"` + // File is the input/output file for the save and load Operation + File string `json:"file,omitempty"` + // Quiet Determines if the save and load operation will be done quietly + Quiet bool `json:"quiet,omitempty"` + // Image is the image the user is providing to save and load + Image string `json:"image,omitempty"` + // User is used in conjunction with Transfer to determine if a valid user was given to save from/load into + User string `json:"user,omitempty"` +} + +// ImageScpConnections provides the ssh related information used in remote image transfer +type ImageScpConnections struct { + // Connections holds the raw string values for connections (ssh or unix) + Connections []string + // URI contains the ssh connection URLs to be used by the client + URI []*url.URL + // Identities contains ssh identity keys to be used by the client + Identities []string +} + +// ImageTreeOptions provides options for ImageEngine.Tree() +type ImageTreeOptions struct { + WhatRequires bool // Show all child images and layers of the specified image +} + +// ImageTreeReport provides results from ImageEngine.Tree() +type ImageTreeReport struct { + Tree string // TODO: Refactor move presentation work out of server +} + +// ShowTrustOptions are the cli options for showing trust +type ShowTrustOptions struct { + JSON bool + PolicyPath string + Raw bool + RegistryPath string +} + +// ShowTrustReport describes the results of show trust +type ShowTrustReport struct { + Raw []byte + SystemRegistriesDirPath string + JSONOutput []byte + Policies []*trust.Policy +} + +// SetTrustOptions describes the CLI options for setting trust +type SetTrustOptions struct { + PolicyPath string + PubKeysFile []string + Type string +} + +// SignOptions describes input options for the CLI signing +type SignOptions struct { + Directory string + SignBy string + CertDir string + Authfile string + All bool +} + +// SignReport describes the result of signing +type SignReport struct{} + +// ImageMountOptions describes the input values for mounting images +// in the CLI +type ImageMountOptions struct { + All bool + Format string +} + +// ImageUnmountOptions are the options from the cli for unmounting +type ImageUnmountOptions struct { + All bool + Force bool +} + +// ImageMountReport describes the response from image mount +type ImageMountReport struct { + Err error + Id string // nolint + Name string + Repositories []string + Path string +} + +// ImageUnmountReport describes the response from umounting an image +type ImageUnmountReport struct { + Err error + Id string // nolint +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go new file mode 100644 index 00000000000..81f3e837bb8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go @@ -0,0 +1,80 @@ +package entities + +import "github.com/containers/image/v5/types" + +// ManifestCreateOptions provides model for creating manifest +type ManifestCreateOptions struct { + All bool `schema:"all"` +} + +// ManifestAddOptions provides model for adding digests to manifest list +// +// swagger:model +type ManifestAddOptions struct { + ManifestAnnotateOptions + // True when operating on a list to include all images + All bool `json:"all" schema:"all"` + // authfile to use when pushing manifest list + Authfile string `json:"-" schema:"-"` + // Home directory for certificates when pushing a manifest list + CertDir string `json:"-" schema:"-"` + // Password to authenticate to registry when pushing manifest list + Password string `json:"-" schema:"-"` + // Should TLS registry certificate be verified? + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` + // Username to authenticate to registry when pushing manifest list + Username string `json:"-" schema:"-"` + // Images is an optional list of images to add to manifest list + Images []string `json:"images" schema:"images"` +} + +// ManifestAnnotateOptions provides model for annotating manifest list +type ManifestAnnotateOptions struct { + // Annotation to add to manifest list + Annotation []string `json:"annotation" schema:"annotation"` + // Arch overrides the architecture for the image + Arch string `json:"arch" schema:"arch"` + // Feature list for the image + Features []string `json:"features" schema:"features"` + // OS overrides the operating system for the image + OS string `json:"os" schema:"os"` + // OS features for the image + OSFeatures []string `json:"os_features" schema:"os_features"` + // OSVersion overrides the operating system for the image + OSVersion string `json:"os_version" schema:"os_version"` + // Variant for the image + Variant string `json:"variant" schema:"variant"` +} + +// ManifestModifyOptions provides the model for mutating a manifest +// +// swagger 2.0 does not support oneOf for schema validation. +// +// Operation "update" uses all fields. +// Operation "remove" uses fields: Operation and Images +// Operation "annotate" uses fields: Operation and Annotations +// +// swagger:model +type ManifestModifyOptions struct { + Operation string `json:"operation" schema:"operation"` // Valid values: update, remove, annotate + ManifestAddOptions + ManifestRemoveOptions +} + +// ManifestRemoveOptions provides the model for removing digests from a manifest +// +// swagger:model +type ManifestRemoveOptions struct { +} + +// ManifestModifyReport provides the model for removed digests and changed manifest +// +// swagger:model +type ManifestModifyReport struct { + // Manifest List ID + ID string `json:"Id"` + // Images to removed from manifest list, otherwise not provided. + Images []string `json:"images,omitempty" schema:"images"` + // Errors associated with operation + Errors []error `json:"errors,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go new file mode 100644 index 00000000000..0f901c7f14d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go @@ -0,0 +1,88 @@ +package entities + +import ( + "net" + + "github.com/containers/common/libnetwork/types" +) + +// NetworkListOptions describes options for listing networks in cli +type NetworkListOptions struct { + Format string + Quiet bool + Filters map[string][]string +} + +// NetworkReloadOptions describes options for reloading container network +// configuration. +type NetworkReloadOptions struct { + All bool + Latest bool +} + +// NetworkReloadReport describes the results of reloading a container network. +type NetworkReloadReport struct { + // nolint:stylecheck,revive + Id string + Err error +} + +// NetworkRmOptions describes options for removing networks +type NetworkRmOptions struct { + Force bool + Timeout *uint +} + +// NetworkRmReport describes the results of network removal +type NetworkRmReport struct { + Name string + Err error +} + +// NetworkCreateOptions describes options to create a network +type NetworkCreateOptions struct { + DisableDNS bool + Driver string + Gateways []net.IP + Internal bool + Labels map[string]string + MacVLAN string + Ranges []string + Subnets []string + IPv6 bool + // Mapping of driver options and values. + Options map[string]string +} + +// NetworkCreateReport describes a created network for the cli +type NetworkCreateReport struct { + Name string +} + +// NetworkDisconnectOptions describes options for disconnecting +// containers from networks +type NetworkDisconnectOptions struct { + Container string + Force bool +} + +// NetworkConnectOptions describes options for connecting +// a container to a network +type NetworkConnectOptions struct { + Container string `json:"container"` + types.PerNetworkOptions +} + +// NetworkPruneReport containers the name of network and an error +// associated in its pruning (removal) +// swagger:model NetworkPruneReport +type NetworkPruneReport struct { + Name string + Error error +} + +// NetworkPruneOptions describes options for pruning +// unused cni networks +type NetworkPruneOptions struct { + Filters map[string][]string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go new file mode 100644 index 00000000000..c9dc3f08c2d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go @@ -0,0 +1,96 @@ +package entities + +import ( + "net" + + "github.com/containers/image/v5/types" +) + +// PlayKubeOptions controls playing kube YAML files. +type PlayKubeOptions struct { + // Annotations - Annotations to add to Pods + Annotations map[string]string + // Authfile - path to an authentication file. + Authfile string + // Indicator to build all images with Containerfile or Dockerfile + Build types.OptionalBool + // CertDir - to a directory containing TLS certifications and keys. + CertDir string + // ContextDir - directory containing image contexts used for Build + ContextDir string + // Down indicates whether to bring contents of a yaml file "down" + // as in stop + Down bool + // Replace indicates whether to delete and recreate a yaml file + Replace bool + // Do not create /etc/hosts within the pod's containers, + // instead use the version from the image + NoHosts bool + // Username for authenticating against the registry. + Username string + // Password for authenticating against the registry. + Password string + // Networks - name of the network to connect to. + Networks []string + // Quiet - suppress output when pulling images. + Quiet bool + // SignaturePolicy - path to a signature-policy file. + SignaturePolicy string + // SkipTLSVerify - skip https and certificate validation when + // contacting container registries. + SkipTLSVerify types.OptionalBool + // SeccompProfileRoot - path to a directory containing seccomp + // profiles. + SeccompProfileRoot string + // StaticIPs - Static IP address used by the pod(s). + StaticIPs []net.IP + // StaticMACs - Static MAC address used by the pod(s). + StaticMACs []net.HardwareAddr + // ConfigMaps - slice of pathnames to kubernetes configmap YAMLs. + ConfigMaps []string + // LogDriver for the container. For example: journald + LogDriver string + // LogOptions for the log driver for the container. + LogOptions []string + // Start - don't start the pod if false + Start types.OptionalBool +} + +// PlayKubePod represents a single pod and associated containers created by play kube +type PlayKubePod struct { + // ID - ID of the pod created as a result of play kube. + ID string + // Containers - the IDs of the containers running in the created pod. + Containers []string + // InitContainers - the IDs of the init containers to be run in the created pod. + InitContainers []string + // Logs - non-fatal errors and log messages while processing. + Logs []string + // ContainerErrors - any errors that occurred while starting containers + // in the pod. + ContainerErrors []string +} + +// PlayKubeVolume represents a single volume created by play kube. +type PlayKubeVolume struct { + // Name - Name of the volume created by play kube. + Name string +} + +// PlayKubeReport contains the results of running play kube. +type PlayKubeReport struct { + // Pods - pods created by play kube. + Pods []PlayKubePod + // Volumes - volumes created by play kube. + Volumes []PlayKubeVolume + PlayKubeTeardown +} + +// PlayKubeDownOptions are options for tearing down pods +type PlayKubeDownOptions struct{} + +// PlayKubeDownReport contains the results of tearing down play kube +type PlayKubeTeardown struct { + StopReport []*PodStopReport + RmReport []*PodRmReport +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go new file mode 100644 index 00000000000..cac961cf16f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go @@ -0,0 +1,511 @@ +package entities + +import ( + "errors" + "strings" + "time" + + commonFlag "github.com/containers/common/pkg/flag" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type PodKillOptions struct { + All bool + Latest bool + Signal string +} + +type PodKillReport struct { + Errs []error + Id string // nolint +} + +type ListPodsReport struct { + Cgroup string + Containers []*ListPodContainer + Created time.Time + Id string // nolint + InfraId string // nolint + Name string + Namespace string + // Network names connected to infra container + Networks []string + Status string + Labels map[string]string +} + +type ListPodContainer struct { + Id string // nolint + Names string + Status string +} + +type PodPauseOptions struct { + All bool + Latest bool +} + +type PodPauseReport struct { + Errs []error + Id string // nolint +} + +type PodunpauseOptions struct { + All bool + Latest bool +} + +type PodUnpauseReport struct { + Errs []error + Id string // nolint +} + +type PodStopOptions struct { + All bool + Ignore bool + Latest bool + Timeout int +} + +type PodStopReport struct { + Errs []error + Id string // nolint +} + +type PodRestartOptions struct { + All bool + Latest bool +} + +type PodRestartReport struct { + Errs []error + Id string // nolint +} + +type PodStartOptions struct { + All bool + Latest bool +} + +type PodStartReport struct { + Errs []error + Id string // nolint +} + +type PodRmOptions struct { + All bool + Force bool + Ignore bool + Latest bool + Timeout *uint +} + +type PodRmReport struct { + Err error + Id string // nolint +} + +// PddSpec is an abstracted version of PodSpecGen designed to eventually accept options +// not meant to be in a specgen +type PodSpec struct { + PodSpecGen specgen.PodSpecGenerator +} + +// PodCreateOptions provides all possible options for creating a pod and its infra container. +// The JSON tags below are made to match the respective field in ContainerCreateOptions for the purpose of mapping. +// swagger:model PodCreateOptions +type PodCreateOptions struct { + CgroupParent string `json:"cgroup_parent,omitempty"` + CreateCommand []string `json:"create_command,omitempty"` + Devices []string `json:"devices,omitempty"` + DeviceReadBPs []string `json:"device_read_bps,omitempty"` + Hostname string `json:"hostname,omitempty"` + Infra bool `json:"infra,omitempty"` + InfraImage string `json:"infra_image,omitempty"` + InfraName string `json:"container_name,omitempty"` + InfraCommand *string `json:"container_command,omitempty"` + InfraConmonPidFile string `json:"container_conmon_pidfile,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Name string `json:"name,omitempty"` + Net *NetOptions `json:"net,omitempty"` + Share []string `json:"share,omitempty"` + ShareParent *bool `json:"share_parent,omitempty"` + Pid string `json:"pid,omitempty"` + Cpus float64 `json:"cpus,omitempty"` + CpusetCpus string `json:"cpuset_cpus,omitempty"` + Userns specgen.Namespace `json:"-"` + Volume []string `json:"volume,omitempty"` + VolumesFrom []string `json:"volumes_from,omitempty"` + SecurityOpt []string `json:"security_opt,omitempty"` + Sysctl []string `json:"sysctl,omitempty"` +} + +// PodLogsOptions describes the options to extract pod logs. +type PodLogsOptions struct { + // Other fields are exactly same as ContainerLogOpts + ContainerLogsOptions + // If specified will only fetch the logs of specified container + ContainerName string + // Show different colors in the logs. + Color bool +} + +type ContainerCreateOptions struct { + Annotation []string + Attach []string + Authfile string + BlkIOWeight string + BlkIOWeightDevice []string + CapAdd []string + CapDrop []string + CgroupNS string + CgroupsMode string + CgroupParent string `json:"cgroup_parent,omitempty"` + CIDFile string + ConmonPIDFile string `json:"container_conmon_pidfile,omitempty"` + CPUPeriod uint64 + CPUQuota int64 + CPURTPeriod uint64 + CPURTRuntime int64 + CPUShares uint64 + CPUS float64 `json:"cpus,omitempty"` + CPUSetCPUs string `json:"cpuset_cpus,omitempty"` + CPUSetMems string + Devices []string `json:"devices,omitempty"` + DeviceCgroupRule []string + DeviceReadBPs []string `json:"device_read_bps,omitempty"` + DeviceReadIOPs []string + DeviceWriteBPs []string + DeviceWriteIOPs []string + Entrypoint *string `json:"container_command,omitempty"` + Env []string + EnvHost bool + EnvFile []string + Expose []string + GIDMap []string + GroupAdd []string + HealthCmd string + HealthInterval string + HealthRetries uint + HealthStartPeriod string + HealthTimeout string + Hostname string `json:"hostname,omitempty"` + HTTPProxy bool + HostUsers []string + ImageVolume string + Init bool + InitContainerType string + InitPath string + Interactive bool + IPC string + Label []string + LabelFile []string + LogDriver string + LogOptions []string + Memory string + MemoryReservation string + MemorySwap string + MemorySwappiness int64 + Name string `json:"container_name"` + NoHealthCheck bool + OOMKillDisable bool + OOMScoreAdj *int + Arch string + OS string + Variant string + PID string `json:"pid,omitempty"` + PIDsLimit *int64 + Platform string + Pod string + PodIDFile string + Personality string + PreserveFDs uint + Privileged bool + PublishAll bool + Pull string + Quiet bool + ReadOnly bool + ReadOnlyTmpFS bool + Restart string + Replace bool + Requires []string + Rm bool + RootFS bool + Secrets []string + SecurityOpt []string `json:"security_opt,omitempty"` + SdNotifyMode string + ShmSize string + SignaturePolicy string + StopSignal string + StopTimeout uint + StorageOpts []string + SubUIDName string + SubGIDName string + Sysctl []string `json:"sysctl,omitempty"` + Systemd string + Timeout uint + TLSVerify commonFlag.OptionalBool + TmpFS []string + TTY bool + Timezone string + Umask string + UnsetEnv []string + UnsetEnvAll bool + UIDMap []string + Ulimit []string + User string + UserNS string `json:"-"` + UTS string + Mount []string + Volume []string `json:"volume,omitempty"` + VolumesFrom []string `json:"volumes_from,omitempty"` + Workdir string + SeccompPolicy string + PidFile string + ChrootDirs []string + IsInfra bool + IsClone bool + + Net *NetOptions `json:"net,omitempty"` + + CgroupConf []string + + PasswdEntry string +} + +func NewInfraContainerCreateOptions() ContainerCreateOptions { + options := ContainerCreateOptions{ + IsInfra: true, + ImageVolume: "bind", + MemorySwappiness: -1, + } + return options +} + +type PodCreateReport struct { + Id string // nolint +} + +func (p *PodCreateOptions) CPULimits() *specs.LinuxCPU { + cpu := &specs.LinuxCPU{} + hasLimits := false + + if p.Cpus != 0 { + period, quota := util.CoresToPeriodAndQuota(p.Cpus) + cpu.Period = &period + cpu.Quota = "a + hasLimits = true + } + if p.CpusetCpus != "" { + cpu.Cpus = p.CpusetCpus + hasLimits = true + } + if !hasLimits { + return cpu + } + return cpu +} + +func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.PodSpecGenerator, error) { + // Basic Config + s.Name = p.Name + s.InfraName = p.InfraName + out, err := specgen.ParseNamespace(p.Pid) + if err != nil { + return nil, err + } + s.Pid = out + s.Hostname = p.Hostname + s.Labels = p.Labels + s.Devices = p.Devices + s.SecurityOpt = p.SecurityOpt + s.NoInfra = !p.Infra + if p.InfraCommand != nil && len(*p.InfraCommand) > 0 { + s.InfraCommand = strings.Split(*p.InfraCommand, " ") + } + if len(p.InfraConmonPidFile) > 0 { + s.InfraConmonPidFile = p.InfraConmonPidFile + } + s.InfraImage = p.InfraImage + s.SharedNamespaces = p.Share + s.ShareParent = p.ShareParent + s.PodCreateCommand = p.CreateCommand + s.VolumesFrom = p.VolumesFrom + + // Networking config + + if p.Net != nil { + s.NetNS = p.Net.Network + s.PortMappings = p.Net.PublishPorts + s.Networks = p.Net.Networks + s.NetworkOptions = p.Net.NetworkOptions + if p.Net.UseImageResolvConf { + s.NoManageResolvConf = true + } + s.DNSServer = p.Net.DNSServers + s.DNSSearch = p.Net.DNSSearch + s.DNSOption = p.Net.DNSOptions + s.NoManageHosts = p.Net.NoHosts + s.HostAdd = p.Net.AddHosts + } + + // Cgroup + s.CgroupParent = p.CgroupParent + + // Resource config + cpuDat := p.CPULimits() + if s.ResourceLimits == nil { + s.ResourceLimits = &specs.LinuxResources{} + s.ResourceLimits.CPU = &specs.LinuxCPU{} + } + if cpuDat != nil { + s.ResourceLimits.CPU = cpuDat + if p.Cpus != 0 { + s.CPUPeriod = *cpuDat.Period + s.CPUQuota = *cpuDat.Quota + } + } + s.Userns = p.Userns + sysctl := map[string]string{} + if ctl := p.Sysctl; len(ctl) > 0 { + sysctl, err = util.ValidateSysctls(ctl) + if err != nil { + return nil, err + } + } + s.Sysctl = sysctl + + return &s, nil +} + +type PodPruneOptions struct { + Force bool `json:"force" schema:"force"` +} + +type PodPruneReport struct { + Err error + Id string // nolint +} + +type PodTopOptions struct { + // CLI flags. + ListDescriptors bool + Latest bool + + // Options for the API. + Descriptors []string + NameOrID string +} + +type PodPSOptions struct { + CtrNames bool + CtrIds bool + CtrStatus bool + Filters map[string][]string + Format string + Latest bool + Namespace bool + Quiet bool + Sort string +} + +type PodInspectOptions struct { + Latest bool + + // Options for the API. + NameOrID string + + Format string +} + +type PodInspectReport struct { + *define.InspectPodData +} + +// PodStatsOptions are options for the pod stats command. +type PodStatsOptions struct { + // All - provide stats for all running pods. + All bool + // Latest - provide stats for the latest pod. + Latest bool +} + +// PodStatsReport includes pod-resource statistics data. +type PodStatsReport struct { + // Percentage of CPU utilized by pod + // example: 75.5% + CPU string + // Humanized Memory usage and maximum + // example: 12mb / 24mb + MemUsage string + // Memory usage and maximum in bytes + // example: 1,000,000 / 4,000,000 + MemUsageBytes string + // Percentage of Memory utilized by pod + // example: 50.5% + Mem string + // Network usage inbound + outbound + NetIO string + // Humanized disk usage read + write + BlockIO string + // Container PID + PIDS string + // Pod ID + // example: 62310217a19e + Pod string + // Container ID + // example: e43534f89a7d + CID string + // Pod Name + // example: elastic_pascal + Name string +} + +// ValidatePodStatsOptions validates the specified slice and options. Allows +// for sharing code in the front- and the back-end. +func ValidatePodStatsOptions(args []string, options *PodStatsOptions) error { + num := 0 + if len(args) > 0 { + num++ + } + if options.All { + num++ + } + if options.Latest { + num++ + } + switch num { + case 0: + // Podman v1 compat: if nothing's specified get all running + // pods. + options.All = true + return nil + case 1: + return nil + default: + return errors.New("--all, --latest and arguments cannot be used together") + } +} + +// PodLogsOptionsToContainerLogsOptions converts PodLogOptions to ContainerLogOptions +func PodLogsOptionsToContainerLogsOptions(options PodLogsOptions) ContainerLogsOptions { + // PodLogsOptions are similar but contains few extra fields like ctrName + // So cast other values as is so we can re-use the code + containerLogsOpts := ContainerLogsOptions{ + Details: options.Details, + Latest: options.Latest, + Follow: options.Follow, + Names: options.Names, + Since: options.Since, + Until: options.Until, + Tail: options.Tail, + Timestamps: options.Timestamps, + Colors: options.Colors, + StdoutWriter: options.StdoutWriter, + StderrWriter: options.StderrWriter, + } + return containerLogsOpts +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go new file mode 100644 index 00000000000..54bcd092baf --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go @@ -0,0 +1,28 @@ +package reports + +type RmReport struct { + Id string `json:"Id"` //nolint + Err error `json:"Err,omitempty"` +} + +func RmReportsIds(r []*RmReport) []string { + ids := make([]string, 0, len(r)) + for _, v := range r { + if v == nil || v.Id == "" { + continue + } + ids = append(ids, v.Id) + } + return ids +} + +func RmReportsErrs(r []*RmReport) []error { + errs := make([]error, 0, len(r)) + for _, v := range r { + if v == nil || v.Err == nil { + continue + } + errs = append(errs, v.Err) + } + return errs +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go new file mode 100644 index 00000000000..497e5d6069d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go @@ -0,0 +1,40 @@ +package reports + +type PruneReport struct { + Id string `json:"Id"` //nolint + Err error `json:"Err,omitempty"` + Size uint64 `json:"Size"` +} + +func PruneReportsIds(r []*PruneReport) []string { + ids := make([]string, 0, len(r)) + for _, v := range r { + if v == nil || v.Id == "" { + continue + } + ids = append(ids, v.Id) + } + return ids +} + +func PruneReportsErrs(r []*PruneReport) []error { + errs := make([]error, 0, len(r)) + for _, v := range r { + if v == nil || v.Err == nil { + continue + } + errs = append(errs, v.Err) + } + return errs +} + +func PruneReportsSize(r []*PruneReport) uint64 { + size := uint64(0) + for _, v := range r { + if v == nil { + continue + } + size += v.Size + } + return size +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go new file mode 100644 index 00000000000..d8af937a721 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go @@ -0,0 +1,128 @@ +package entities + +import ( + "time" + + "github.com/containers/podman/v4/pkg/errorhandling" +) + +type SecretCreateReport struct { + ID string +} + +type SecretCreateOptions struct { + Driver string + DriverOpts map[string]string +} + +type SecretListRequest struct { + Filters map[string][]string +} + +type SecretListReport struct { + ID string + Name string + Driver string + CreatedAt string + UpdatedAt string +} + +type SecretRmOptions struct { + All bool +} + +type SecretRmReport struct { + ID string + Err error +} + +type SecretInfoReport struct { + ID string + CreatedAt time.Time + UpdatedAt time.Time + Spec SecretSpec +} + +type SecretInfoReportCompat struct { + SecretInfoReport + Version SecretVersion +} + +type SecretVersion struct { + Index int +} + +type SecretSpec struct { + Name string + Driver SecretDriverSpec +} + +type SecretDriverSpec struct { + Name string + Options map[string]string +} + +// swagger:model SecretCreate +type SecretCreateRequest struct { + // User-defined name of the secret. + Name string + // Base64-url-safe-encoded (RFC 4648) data to store as secret. + Data string + // Driver represents a driver (default "file") + Driver SecretDriverSpec +} + +// Secret create response +// swagger:response SecretCreateResponse +type SwagSecretCreateResponse struct { + // in:body + Body struct { + SecretCreateReport + } +} + +// Secret list response +// swagger:response SecretListResponse +type SwagSecretListResponse struct { + // in:body + Body []*SecretInfoReport +} + +// Secret list response +// swagger:response SecretListCompatResponse +type SwagSecretListCompatResponse struct { + // in:body + Body []*SecretInfoReportCompat +} + +// Secret inspect response +// swagger:response SecretInspectResponse +type SwagSecretInspectResponse struct { + // in:body + Body SecretInfoReport +} + +// Secret inspect compat +// swagger:response SecretInspectCompatResponse +type SwagSecretInspectCompatResponse struct { + // in:body + Body SecretInfoReportCompat +} + +// No such secret +// swagger:response NoSuchSecret +type SwagErrNoSuchSecret struct { + // in:body + Body struct { + errorhandling.ErrorModel + } +} + +// Secret in use +// swagger:response SecretInUse +type SwagErrSecretInUse struct { + // in:body + Body struct { + errorhandling.ErrorModel + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/set.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/set.go new file mode 100644 index 00000000000..1d31d82f989 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/set.go @@ -0,0 +1,45 @@ +package entities + +import ( + "strings" +) + +type StringSet struct { + m map[string]struct{} +} + +func NewStringSet(elem ...string) *StringSet { + s := &StringSet{} + s.m = make(map[string]struct{}, len(elem)) + for _, e := range elem { + s.Add(e) + } + return s +} + +func (s *StringSet) Add(elem string) { + s.m[elem] = struct{}{} +} + +func (s *StringSet) Remove(elem string) { + delete(s.m, elem) +} + +func (s *StringSet) Contains(elem string) bool { + _, ok := s.m[elem] + return ok +} + +func (s *StringSet) Elements() []string { + keys := make([]string, len(s.m)) + i := 0 + for k := range s.m { + keys[i] = k + i++ + } + return keys +} + +func (s *StringSet) String() string { + return strings.Join(s.Elements(), ", ") +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go new file mode 100644 index 00000000000..21026477d5b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go @@ -0,0 +1,125 @@ +package entities + +import ( + "time" + + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v4/pkg/domain/entities/types" +) + +// ServiceOptions provides the input for starting an API and sidecar pprof services +type ServiceOptions struct { + CorsHeaders string // Cross-Origin Resource Sharing (CORS) headers + PProfAddr string // Network address to bind pprof profiles service + Timeout time.Duration // Duration of inactivity the service should wait before shutting down + URI string // Path to unix domain socket service should listen on +} + +// SystemPruneOptions provides options to prune system. +type SystemPruneOptions struct { + All bool + Volume bool + Filters map[string][]string `json:"filters" schema:"filters"` +} + +// SystemPruneReport provides report after system prune is executed. +type SystemPruneReport struct { + PodPruneReport []*PodPruneReport + ContainerPruneReports []*reports.PruneReport + ImagePruneReports []*reports.PruneReport + VolumePruneReports []*reports.PruneReport + ReclaimedSpace uint64 +} + +// SystemMigrateOptions describes the options needed for the +// cli to migrate runtimes of containers +type SystemMigrateOptions struct { + NewRuntime string +} + +// SystemDfOptions describes the options for getting df information +type SystemDfOptions struct { + Format string + Verbose bool +} + +// SystemDfReport describes the response for df information +type SystemDfReport struct { + Images []*SystemDfImageReport + Containers []*SystemDfContainerReport + Volumes []*SystemDfVolumeReport +} + +// SystemDfImageReport describes an image for use with df +type SystemDfImageReport struct { + Repository string + Tag string + ImageID string + Created time.Time + Size int64 + SharedSize int64 + UniqueSize int64 + Containers int +} + +// SystemDfContainerReport describes a container for use with df +type SystemDfContainerReport struct { + ContainerID string + Image string + Command []string + LocalVolumes int + Size int64 + RWSize int64 + Created time.Time + Status string + Names string +} + +// SystemDfVolumeReport describes a volume and its size +type SystemDfVolumeReport struct { + VolumeName string + Links int + Size int64 + ReclaimableSize int64 +} + +// SystemResetOptions describes the options for resetting your +// container runtime storage, etc +type SystemResetOptions struct { + Force bool +} + +// SystemVersionReport describes version information about the running Podman service +type SystemVersionReport struct { + // Always populated + Client *define.Version `json:",omitempty"` + // May be populated, when in tunnel mode + Server *define.Version `json:",omitempty"` +} + +// SystemUnshareOptions describes the options for the unshare command +type SystemUnshareOptions struct { + RootlessNetNS bool +} + +type ComponentVersion struct { + types.Version +} + +// ListRegistriesReport is the report when querying for a sorted list of +// registries which may be contacted during certain operations. +type ListRegistriesReport struct { + Registries []string +} + +// swagger:model AuthConfig +type AuthConfig struct { + types.AuthConfig +} + +// AuthReport describes the response for authentication check +type AuthReport struct { + IdentityToken string + Status string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go new file mode 100644 index 00000000000..5ae8a49315f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go @@ -0,0 +1,128 @@ +package entities + +import ( + "net" + + buildahDefine "github.com/containers/buildah/define" + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/storage/pkg/archive" + dockerAPI "github.com/docker/docker/api/types" +) + +type Container struct { + IDOrNamed +} + +type Volume struct { + Identifier +} + +type Report struct { + Id []string // nolint + Err map[string]error +} + +type PodDeleteReport struct{ Report } + +type ( + VolumeDeleteOptions struct{} + VolumeDeleteReport struct{ Report } +) + +type NetFlags struct { + AddHosts []string `json:"add-host,omitempty"` + DNS []string `json:"dns,omitempty"` + DNSOpt []string `json:"dns-opt,omitempty"` + DNDSearch []string `json:"dns-search,omitempty"` + MacAddr string `json:"mac-address,omitempty"` + Publish []string `json:"publish,omitempty"` + IP string `json:"ip,omitempty"` + NoHosts bool `json:"no-hosts,omitempty"` + Network string `json:"network,omitempty"` + NetworkAlias []string `json:"network-alias,omitempty"` +} + +// NetOptions reflect the shared network options between +// pods and containers +type NetOptions struct { + AddHosts []string `json:"hostadd,omitempty"` + Aliases []string `json:"network_alias,omitempty"` + Networks map[string]types.PerNetworkOptions `json:"networks,omitempty"` + UseImageResolvConf bool `json:"no_manage_resolv_conf,omitempty"` + DNSOptions []string `json:"dns_option,omitempty"` + DNSSearch []string `json:"dns_search,omitempty"` + DNSServers []net.IP `json:"dns_server,omitempty"` + Network specgen.Namespace `json:"netns,omitempty"` + NoHosts bool `json:"no_manage_hosts,omitempty"` + PublishPorts []types.PortMapping `json:"portmappings,omitempty"` + // NetworkOptions are additional options for each network + NetworkOptions map[string][]string `json:"network_options,omitempty"` +} + +// InspectOptions all CLI inspect commands and inspect sub-commands use the same options +type InspectOptions struct { + // Format - change the output to JSON or a Go template. + Format string `json:",omitempty"` + // Latest - inspect the latest container Podman is aware of. + Latest bool `json:",omitempty"` + // Size (containers only) - display total file size. + Size bool `json:",omitempty"` + // Type -- return JSON for specified type. + Type string `json:",omitempty"` + // All -- inspect all + All bool `json:",omitempty"` +} + +// DiffOptions all API and CLI diff commands and diff sub-commands use the same options +type DiffOptions struct { + Format string `json:",omitempty"` // CLI only + Latest bool `json:",omitempty"` // API and CLI, only supported by containers + Archive bool `json:",omitempty"` // CLI only + Type define.DiffType // Type which should be compared +} + +// DiffReport provides changes for object +type DiffReport struct { + Changes []archive.Change +} + +type EventsOptions struct { + FromStart bool + EventChan chan *events.Event + Filter []string + Stream bool + Since string + Until string +} + +// ContainerCreateResponse is the response struct for creating a container +type ContainerCreateResponse struct { + // ID of the container created + // required: true + ID string `json:"Id"` + // Warnings during container creation + // required: true + Warnings []string `json:"Warnings"` +} + +// BuildOptions describe the options for building container images. +type BuildOptions struct { + buildahDefine.BuildOptions +} + +// BuildReport is the image-build report. +type BuildReport struct { + // ID of the image. + ID string +} + +type IDOrNameResponse struct { + // The Id or Name of an object + IDOrName string +} + +// swagger:model +type IDResponse dockerAPI.IDResponse diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/auth.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/auth.go new file mode 100644 index 00000000000..7f2480173f8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/auth.go @@ -0,0 +1,23 @@ +// copied from github.com/docker/docker/api/types +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go new file mode 100644 index 00000000000..7dc785078bc --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go @@ -0,0 +1,29 @@ +// copied from github.com/docker/docker/api/types +package types + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go new file mode 100644 index 00000000000..84f85b83f47 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go @@ -0,0 +1,73 @@ +package entities + +import ( + "net/url" + + "github.com/containers/podman/v4/libpod/define" +) + +// VolumeCreateOptions provides details for creating volumes +// swagger:model +type VolumeCreateOptions struct { + // New volume's name. Can be left blank + Name string `schema:"name"` + // Volume driver to use + Driver string `schema:"driver"` + // User-defined key/value metadata. Provided for compatibility + Label map[string]string `schema:"label"` + // User-defined key/value metadata. Preferred field, will override Label + Labels map[string]string `schema:"labels"` + // Mapping of driver options and values. + Options map[string]string `schema:"opts"` +} + +type VolumeConfigResponse struct { + define.InspectVolumeData +} + +type VolumeRmOptions struct { + All bool + Force bool + Timeout *uint +} + +type VolumeRmReport struct { + Err error + Id string // nolint +} + +type VolumeInspectReport struct { + *VolumeConfigResponse +} + +// VolumePruneOptions describes the options needed +// to prune a volume from the CLI +type VolumePruneOptions struct { + Filters url.Values `json:"filters" schema:"filters"` +} + +type VolumeListOptions struct { + Filter map[string][]string +} + +type VolumeListReport struct { + VolumeConfigResponse +} + +/* + * Docker API compatibility types + */ + +// VolumeMountReport describes the response from volume mount +type VolumeMountReport struct { + Err error + Id string // nolint + Name string + Path string +} + +// VolumeUnmountReport describes the response from umounting a volume +type VolumeUnmountReport struct { + Err error + Id string // nolint +} diff --git a/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go b/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go new file mode 100644 index 00000000000..6ee1e7e86c4 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go @@ -0,0 +1,123 @@ +package errorhandling + +import ( + "os" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// JoinErrors converts the error slice into a single human-readable error. +func JoinErrors(errs []error) error { + if len(errs) == 0 { + return nil + } + + // If there's just one error, return it. This prevents the "%d errors + // occurred:" header plus list from the multierror package. + if len(errs) == 1 { + return errs[0] + } + + // `multierror` appends new lines which we need to remove to prevent + // blank lines when printing the error. + var multiE *multierror.Error + multiE = multierror.Append(multiE, errs...) + + finalErr := multiE.ErrorOrNil() + if finalErr == nil { + return nil + } + return errors.New(strings.TrimSpace(finalErr.Error())) +} + +// ErrorsToString converts the slice of errors into a slice of corresponding +// error messages. +func ErrorsToStrings(errs []error) []string { + if len(errs) == 0 { + return nil + } + strErrs := make([]string, len(errs)) + for i := range errs { + strErrs[i] = errs[i].Error() + } + return strErrs +} + +// StringsToErrors converts a slice of error messages into a slice of +// corresponding errors. +func StringsToErrors(strErrs []string) []error { + if len(strErrs) == 0 { + return nil + } + errs := make([]error, len(strErrs)) + for i := range strErrs { + errs[i] = errors.New(strErrs[i]) + } + return errs +} + +// SyncQuiet syncs a file and logs any error. Should only be used within +// a defer. +func SyncQuiet(f *os.File) { + if err := f.Sync(); err != nil { + logrus.Errorf("Unable to sync file %s: %q", f.Name(), err) + } +} + +// CloseQuiet closes a file and logs any error. Should only be used within +// a defer. +func CloseQuiet(f *os.File) { + if err := f.Close(); err != nil { + logrus.Errorf("Unable to close file %s: %q", f.Name(), err) + } +} + +// Contains checks if err's message contains sub's message. Contains should be +// used iff either err or sub has lost type information (e.g., due to +// marshaling). For typed errors, please use `errors.Contains(...)` or `Is()` +// in recent version of Go. +func Contains(err error, sub error) bool { + return strings.Contains(err.Error(), sub.Error()) +} + +// PodConflictErrorModel is used in remote connections with podman +type PodConflictErrorModel struct { + Errs []string + Id string // nolint +} + +// ErrorModel is used in remote connections with podman +type ErrorModel struct { + // API root cause formatted for automated parsing + // example: API root cause + Because string `json:"cause"` + // human error message, formatted for a human to read + // example: human error message + Message string `json:"message"` + // HTTP response code + // min: 400 + ResponseCode int `json:"response"` +} + +func (e ErrorModel) Error() string { + return e.Message +} + +func (e ErrorModel) Cause() error { + return errors.New(e.Because) +} + +func (e ErrorModel) Code() int { + return e.ResponseCode +} + +func (e PodConflictErrorModel) Error() string { + return strings.Join(e.Errs, ",") +} + +func (e PodConflictErrorModel) Code() int { + return 409 +} diff --git a/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go b/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go new file mode 100644 index 00000000000..767d86daf59 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go @@ -0,0 +1,58 @@ +package inspect + +import ( + "time" + + "github.com/containers/image/v5/manifest" + "github.com/containers/podman/v4/libpod/define" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageData holds the inspect information of an image. +type ImageData struct { + ID string `json:"Id"` + Digest digest.Digest `json:"Digest"` + RepoTags []string `json:"RepoTags"` + RepoDigests []string `json:"RepoDigests"` + Parent string `json:"Parent"` + Comment string `json:"Comment"` + Created *time.Time `json:"Created"` + Config *v1.ImageConfig `json:"Config"` + Version string `json:"Version"` + Author string `json:"Author"` + Architecture string `json:"Architecture"` + Os string `json:"Os"` + Size int64 `json:"Size"` + VirtualSize int64 `json:"VirtualSize"` + GraphDriver *define.DriverData `json:"GraphDriver"` + RootFS *RootFS `json:"RootFS"` + Labels map[string]string `json:"Labels"` + Annotations map[string]string `json:"Annotations"` + ManifestType string `json:"ManifestType"` + User string `json:"User"` + History []v1.History `json:"History"` + NamesHistory []string `json:"NamesHistory"` + HealthCheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` +} + +// RootFS holds the root fs information of an image. +type RootFS struct { + Type string `json:"Type"` + Layers []digest.Digest `json:"Layers"` +} + +// ImageResult is used for podman images for collection and output. +type ImageResult struct { + Tag string + Repository string + RepoDigests []string + RepoTags []string + ID string + Digest digest.Digest + ConfigDigest digest.Digest + Created time.Time + Size *uint64 + Labels map[string]string + Dangling bool +} diff --git a/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go new file mode 100644 index 00000000000..c95f8e27547 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go @@ -0,0 +1,418 @@ +package namespaces + +import ( + "fmt" + "strconv" + "strings" + + "github.com/containers/storage/types" +) + +const ( + bridgeType = "bridge" + containerType = "container" + defaultType = "default" + hostType = "host" + noneType = "none" + nsType = "ns" + podType = "pod" + privateType = "private" + shareableType = "shareable" + slirpType = "slirp4netns" +) + +// CgroupMode represents cgroup mode in the container. +type CgroupMode string + +// IsHost indicates whether the container uses the host's cgroup. +func (n CgroupMode) IsHost() bool { + return n == hostType +} + +// IsDefaultValue indicates whether the cgroup namespace has the default value. +func (n CgroupMode) IsDefaultValue() bool { + return n == "" || n == defaultType +} + +// IsNS indicates a cgroup namespace passed in by path (ns:) +func (n CgroupMode) IsNS() bool { + return strings.HasPrefix(string(n), nsType) +} + +// NS gets the path associated with a ns: cgroup ns +func (n CgroupMode) NS() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsContainer indicates whether the container uses a new cgroup namespace. +func (n CgroupMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == containerType +} + +// Container returns the name of the container whose cgroup namespace is going to be used. +func (n CgroupMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == containerType { + return parts[1] + } + return "" +} + +// IsPrivate indicates whether the container uses the a private cgroup. +func (n CgroupMode) IsPrivate() bool { + return n == privateType +} + +// Valid indicates whether the Cgroup namespace is valid. +func (n CgroupMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", hostType, privateType, nsType: + case containerType: + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == hostType +} + +// IsKeepID indicates whether container uses a mapping where the (uid, gid) on the host is kept inside of the namespace. +func (n UsernsMode) IsKeepID() bool { + return n == "keep-id" +} + +// IsNoMap indicates whether container uses a mapping where the (uid, gid) on the host is not present in the namespace. +func (n UsernsMode) IsNoMap() bool { + return n == "nomap" +} + +// IsAuto indicates whether container uses the "auto" userns mode. +func (n UsernsMode) IsAuto() bool { + parts := strings.Split(string(n), ":") + return parts[0] == "auto" +} + +// IsDefaultValue indicates whether the user namespace has the default value. +func (n UsernsMode) IsDefaultValue() bool { + return n == "" || n == defaultType +} + +// GetAutoOptions returns a AutoUserNsOptions with the settings to setup automatically +// a user namespace. +func (n UsernsMode) GetAutoOptions() (*types.AutoUserNsOptions, error) { + parts := strings.SplitN(string(n), ":", 2) + if parts[0] != "auto" { + return nil, fmt.Errorf("wrong user namespace mode") + } + options := types.AutoUserNsOptions{} + if len(parts) == 1 { + return &options, nil + } + for _, o := range strings.Split(parts[1], ",") { + v := strings.SplitN(o, "=", 2) + if len(v) != 2 { + return nil, fmt.Errorf("invalid option specified: %q", o) + } + switch v[0] { + case "size": + s, err := strconv.ParseUint(v[1], 10, 32) + if err != nil { + return nil, err + } + options.Size = uint32(s) + case "uidmapping": + mapping, err := types.ParseIDMapping([]string{v[1]}, nil, "", "") + if err != nil { + return nil, err + } + options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping.UIDMap...) + case "gidmapping": + mapping, err := types.ParseIDMapping(nil, []string{v[1]}, "", "") + if err != nil { + return nil, err + } + options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping.GIDMap...) + default: + return nil, fmt.Errorf("unknown option specified: %q", v[0]) + } + } + return &options, nil +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", privateType, hostType, "keep-id", nsType, "auto", "nomap": + case containerType: + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// IsNS indicates a userns namespace passed in by path (ns:) +func (n UsernsMode) IsNS() bool { + return strings.HasPrefix(string(n), "ns:") +} + +// NS gets the path associated with a ns: userns ns +func (n UsernsMode) NS() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsContainer indicates whether container uses a container userns. +func (n UsernsMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == containerType +} + +// Container is the id of the container which network this container is connected to. +func (n UsernsMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == containerType { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == hostType +} + +// IsContainer indicates whether the container uses a container's UTS namespace. +func (n UTSMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == containerType +} + +// Container returns the name of the container whose uts namespace is going to be used. +func (n UTSMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == containerType { + return parts[1] + } + return "" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", privateType, hostType: + case containerType: + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its own private ipc namespace which cannot be shared. +func (n IpcMode) IsPrivate() bool { + return n == privateType +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == hostType +} + +// IsShareable indicates whether the container uses its own shareable ipc namespace which can be shared. +func (n IpcMode) IsShareable() bool { + return n == shareableType +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == containerType +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == noneType +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == containerType { + return parts[1] + } + return "" +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == hostType +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == containerType +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", privateType, hostType: + case containerType: + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == containerType { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == noneType +} + +// IsHost indicates whether the container uses the host's network stack. +func (n NetworkMode) IsHost() bool { + return n == hostType +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == defaultType +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == containerType +} + +// Container is the id of the container which network this container is connected to. +func (n NetworkMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == containerType { + return parts[1] + } + return "" +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == bridgeType +} + +// IsSlirp4netns indicates if we are running a rootless network stack +func (n NetworkMode) IsSlirp4netns() bool { + return n == slirpType || strings.HasPrefix(string(n), slirpType+":") +} + +// IsNS indicates a network namespace passed in by path (ns:) +func (n NetworkMode) IsNS() bool { + return strings.HasPrefix(string(n), nsType) +} + +// NS gets the path associated with a ns: network ns +func (n NetworkMode) NS() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsPod returns whether the network refers to pod networking +func (n NetworkMode) IsPod() bool { + return n == podType +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() && !n.IsSlirp4netns() && !n.IsNS() +} diff --git a/vendor/github.com/containers/podman/v4/pkg/ps/define/types.go b/vendor/github.com/containers/podman/v4/pkg/ps/define/types.go new file mode 100644 index 00000000000..878653c3a35 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/ps/define/types.go @@ -0,0 +1,8 @@ +package define + +// ContainerSize holds the size of the container's root filesystem and top +// read-write layer. +type ContainerSize struct { + RootFsSize int64 `json:"rootFsSize"` + RwSize int64 `json:"rwSize"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go new file mode 100644 index 00000000000..d7143f54919 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go @@ -0,0 +1,206 @@ +package rootless + +import ( + "errors" + "fmt" + "os" + "sort" + "sync" + + "github.com/containers/storage/pkg/lockfile" + "github.com/opencontainers/runc/libcontainer/user" + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +// TryJoinPauseProcess attempts to join the namespaces of the pause PID via +// TryJoinFromFilePaths. If joining fails, it attempts to delete the specified +// file. +func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { + if _, err := os.Stat(pausePidPath); err != nil { + if errors.Is(err, os.ErrNotExist) { + return false, -1, nil + } + return false, -1, err + } + + became, ret, err := TryJoinFromFilePaths("", false, []string{pausePidPath}) + if err == nil { + return became, ret, nil + } + + // It could not join the pause process, let's lock the file before trying to delete it. + pidFileLock, err := lockfile.GetLockfile(pausePidPath) + if err != nil { + // The file was deleted by another process. + if os.IsNotExist(err) { + return false, -1, nil + } + return false, -1, fmt.Errorf("acquiring lock on %s: %w", pausePidPath, err) + } + + pidFileLock.Lock() + defer func() { + if pidFileLock.Locked() { + pidFileLock.Unlock() + } + }() + + // Now the pause PID file is locked. Try to join once again in case it changed while it was not locked. + became, ret, err = TryJoinFromFilePaths("", false, []string{pausePidPath}) + if err != nil { + // It is still failing. We can safely remove it. + os.Remove(pausePidPath) + return false, -1, nil // nolint: nilerr + } + return became, ret, err +} + +var ( + uidMap []user.IDMap + uidMapError error + uidMapOnce sync.Once + + gidMap []user.IDMap + gidMapError error + gidMapOnce sync.Once +) + +// GetAvailableUIDMap returns the UID mappings in the +// current user namespace. +func GetAvailableUIDMap() ([]user.IDMap, error) { + uidMapOnce.Do(func() { + var err error + uidMap, err = user.ParseIDMapFile("/proc/self/uid_map") + if err != nil { + uidMapError = err + return + } + }) + return uidMap, uidMapError +} + +// GetAvailableGIDMap returns the GID mappings in the +// current user namespace. +func GetAvailableGIDMap() ([]user.IDMap, error) { + gidMapOnce.Do(func() { + var err error + gidMap, err = user.ParseIDMapFile("/proc/self/gid_map") + if err != nil { + gidMapError = err + return + } + }) + return gidMap, gidMapError +} + +// GetAvailableIDMaps returns the UID and GID mappings in the +// current user namespace. +func GetAvailableIDMaps() ([]user.IDMap, []user.IDMap, error) { + u, err := GetAvailableUIDMap() + if err != nil { + return nil, nil, err + } + g, err := GetAvailableGIDMap() + if err != nil { + return nil, nil, err + } + return u, g, nil +} + +func countAvailableIDs(mappings []user.IDMap) int64 { + availableUids := int64(0) + for _, r := range mappings { + availableUids += r.Count + } + return availableUids +} + +// GetAvailableUids returns how many UIDs are available in the +// current user namespace. +func GetAvailableUids() (int64, error) { + uids, err := GetAvailableUIDMap() + if err != nil { + return -1, err + } + + return countAvailableIDs(uids), nil +} + +// GetAvailableGids returns how many GIDs are available in the +// current user namespace. +func GetAvailableGids() (int64, error) { + gids, err := GetAvailableGIDMap() + if err != nil { + return -1, err + } + + return countAvailableIDs(gids), nil +} + +// findIDInMappings find the the mapping that contains the specified ID. +// It assumes availableMappings is sorted by ID. +func findIDInMappings(id int64, availableMappings []user.IDMap) *user.IDMap { + i := sort.Search(len(availableMappings), func(i int) bool { + return availableMappings[i].ID <= id + }) + if i < 0 || i >= len(availableMappings) { + return nil + } + r := &availableMappings[i] + if id >= r.ID && id < r.ID+r.Count { + return r + } + return nil +} + +// MaybeSplitMappings checks whether the specified OCI mappings are possible +// in the current user namespace or the specified ranges must be split. +func MaybeSplitMappings(mappings []spec.LinuxIDMapping, availableMappings []user.IDMap) []spec.LinuxIDMapping { + var ret []spec.LinuxIDMapping + var overflow spec.LinuxIDMapping + overflow.Size = 0 + consumed := 0 + sort.Slice(availableMappings, func(i, j int) bool { + return availableMappings[i].ID > availableMappings[j].ID + }) + for { + cur := overflow + // if there is no overflow left from the previous request, get the next one + if cur.Size == 0 { + if consumed == len(mappings) { + // all done + return ret + } + cur = mappings[consumed] + consumed++ + } + + // Find the range where the first specified ID is present + r := findIDInMappings(int64(cur.HostID), availableMappings) + if r == nil { + // The requested range is not available. Just return the original request + // and let other layers deal with it. + return mappings + } + + offsetInRange := cur.HostID - uint32(r.ID) + + usableIDs := uint32(r.Count) - offsetInRange + + // the current range can satisfy the whole request + if usableIDs >= cur.Size { + // reset the overflow + overflow.Size = 0 + } else { + // the current range can satisfy the request partially + // so move the rest to overflow + overflow.Size = cur.Size - usableIDs + overflow.ContainerID = cur.ContainerID + usableIDs + overflow.HostID = cur.HostID + usableIDs + + // and cap to the usableIDs count + cur.Size = usableIDs + } + ret = append(ret, cur) + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c new file mode 100644 index 00000000000..94bd40f8627 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c @@ -0,0 +1,975 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef TEMP_FAILURE_RETRY +#define TEMP_FAILURE_RETRY(expression) \ + (__extension__ \ + ({ long int __result; \ + do __result = (long int) (expression); \ + while (__result == -1L && errno == EINTR); \ + __result; })) +#endif + +#define cleanup_free __attribute__ ((cleanup (cleanup_freep))) +#define cleanup_close __attribute__ ((cleanup (cleanup_closep))) +#define cleanup_dir __attribute__ ((cleanup (cleanup_dirp))) + +static inline void +cleanup_freep (void *p) +{ + void **pp = (void **) p; + free (*pp); +} + +static inline void +cleanup_closep (void *p) +{ + int *pp = p; + if (*pp >= 0) + TEMP_FAILURE_RETRY (close (*pp)); +} + +static inline void +cleanup_dirp (DIR **p) +{ + DIR *dir = *p; + if (dir) + closedir (dir); +} + +int rename_noreplace (int olddirfd, const char *oldpath, int newdirfd, const char *newpath) +{ + int ret; + +# ifdef SYS_renameat2 +# ifndef RENAME_NOREPLACE +# define RENAME_NOREPLACE (1 << 0) +# endif + + ret = (int) syscall (SYS_renameat2, olddirfd, oldpath, newdirfd, newpath, RENAME_NOREPLACE); + if (ret == 0 || errno != EINVAL) + return ret; + + /* Fallback in case of errno==EINVAL. */ +# endif + + /* This might be an issue if another process is trying to read the file while it is empty. */ + ret = open (newpath, O_EXCL|O_CREAT, 0700); + if (ret < 0) + return ret; + close (ret); + + /* We are sure we created the file, let's overwrite it. */ + return rename (oldpath, newpath); +} + +static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; +static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; + +static int open_files_max_fd; +static fd_set *open_files_set; +static uid_t rootless_uid_init; +static gid_t rootless_gid_init; +static bool do_socket_activation = false; +static char *saved_systemd_listen_fds; +static char *saved_systemd_listen_pid; +static char *saved_systemd_listen_fdnames; + +static int +syscall_setresuid (uid_t ruid, uid_t euid, uid_t suid) +{ + return (int) syscall (__NR_setresuid, ruid, euid, suid); +} + +static int +syscall_setresgid (gid_t rgid, gid_t egid, gid_t sgid) +{ + return (int) syscall (__NR_setresgid, rgid, egid, sgid); +} + +uid_t +rootless_uid () +{ + return rootless_uid_init; +} + +uid_t +rootless_gid () +{ + return rootless_gid_init; +} + +static void +do_pause () +{ + int i; + struct sigaction act; + int const sig[] = + { + SIGALRM, SIGHUP, SIGINT, SIGPIPE, SIGQUIT, SIGPOLL, + SIGPROF, SIGVTALRM, SIGXCPU, SIGXFSZ, 0 + }; + + act.sa_handler = SIG_IGN; + + for (i = 0; sig[i]; i++) + sigaction (sig[i], &act, NULL); + + /* Attempt to execv catatonit to keep the pause process alive. */ + execl ("/usr/libexec/podman/catatonit", "catatonit", "-P", NULL); + execl ("/usr/bin/catatonit", "catatonit", "-P", NULL); + /* and if the catatonit executable could not be found, fallback here... */ + + prctl (PR_SET_NAME, "podman pause", NULL, NULL, NULL); + while (1) + pause (); +} + +static char ** +get_cmd_line_args () +{ + cleanup_free char *buffer = NULL; + cleanup_close int fd = -1; + size_t allocated; + size_t used = 0; + int ret; + int i, argc = 0; + char **argv; + + fd = open ("/proc/self/cmdline", O_RDONLY); + if (fd < 0) + return NULL; + + allocated = 512; + buffer = malloc (allocated); + if (buffer == NULL) + return NULL; + for (;;) + { + ret = TEMP_FAILURE_RETRY (read (fd, buffer + used, allocated - used)); + if (ret < 0) + return NULL; + + if (ret == 0) + break; + + used += ret; + if (allocated == used) + { + allocated += 512; + char *tmp = realloc (buffer, allocated); + if (tmp == NULL) + return NULL; + buffer = tmp; + } + } + + for (i = 0; i < used; i++) + if (buffer[i] == '\0') + argc++; + if (argc == 0) + return NULL; + + argv = malloc (sizeof (char *) * (argc + 1)); + if (argv == NULL) + return NULL; + + argc = 0; + + argv[argc++] = buffer; + for (i = 0; i < used - 1; i++) + if (buffer[i] == '\0') + argv[argc++] = buffer + i + 1; + + argv[argc] = NULL; + + /* Move ownership. */ + buffer = NULL; + + return argv; +} + +static bool +can_use_shortcut () +{ + cleanup_free char **argv = NULL; + cleanup_free char *argv0 = NULL; + bool ret = true; + int argc; + +#ifdef DISABLE_JOIN_SHORTCUT + return false; +#endif + + argv = get_cmd_line_args (); + if (argv == NULL) + return false; + + argv0 = argv[0]; + + if (strstr (argv[0], "podman") == NULL) + return false; + + for (argc = 0; argv[argc]; argc++) + { + if (argc == 0 || argv[argc][0] == '-') + continue; + + if (strcmp (argv[argc], "mount") == 0 + || strcmp (argv[argc], "machine") == 0 + || strcmp (argv[argc], "search") == 0 + || (strcmp (argv[argc], "system") == 0 && argv[argc+1] && strcmp (argv[argc+1], "service") != 0)) + { + ret = false; + break; + } + + if (argv[argc+1] != NULL && (strcmp (argv[argc], "container") == 0 || + strcmp (argv[argc], "image") == 0) && + (strcmp (argv[argc+1], "mount") == 0 || strcmp (argv[argc+1], "scp") == 0)) + { + ret = false; + break; + } + } + + return ret; +} + +static int +open_namespace (int pid_to_join, const char *ns_file) +{ + char ns_path[PATH_MAX]; + int ret; + + ret = snprintf (ns_path, PATH_MAX, "/proc/%d/ns/%s", pid_to_join, ns_file); + if (ret == PATH_MAX) + { + fprintf (stderr, "internal error: namespace path too long\n"); + return -1; + } + + return open (ns_path, O_CLOEXEC | O_RDONLY); +} + +int +is_fd_inherited(int fd) +{ + if (open_files_set == NULL || fd > open_files_max_fd || fd < 0) + return 0; + + return FD_ISSET(fd % FD_SETSIZE, &(open_files_set[fd / FD_SETSIZE])) ? 1 : 0; +} + +static void __attribute__((constructor)) init() +{ + const char *xdg_runtime_dir; + const char *pause; + const char *listen_pid; + const char *listen_fds; + const char *listen_fdnames; + cleanup_dir DIR *d = NULL; + + pause = getenv ("_PODMAN_PAUSE"); + if (pause && pause[0]) + { + do_pause (); + _exit (EXIT_FAILURE); + } + + /* Store how many FDs were open before the Go runtime kicked in. */ + d = opendir ("/proc/self/fd"); + if (d) + { + struct dirent *ent; + size_t size = 0; + + for (ent = readdir (d); ent; ent = readdir (d)) + { + int fd; + + if (ent->d_name[0] == '.') + continue; + + fd = atoi (ent->d_name); + if (fd == dirfd (d)) + continue; + + if (fd >= size * FD_SETSIZE) + { + int i; + size_t new_size; + + new_size = (fd / FD_SETSIZE) + 1; + open_files_set = realloc (open_files_set, new_size * sizeof (fd_set)); + if (open_files_set == NULL) + _exit (EXIT_FAILURE); + + for (i = size; i < new_size; i++) + FD_ZERO (&(open_files_set[i])); + + size = new_size; + } + + if (fd > open_files_max_fd) + open_files_max_fd = fd; + + FD_SET (fd % FD_SETSIZE, &(open_files_set[fd / FD_SETSIZE])); + } + } + + listen_pid = getenv("LISTEN_PID"); + listen_fds = getenv("LISTEN_FDS"); + listen_fdnames = getenv("LISTEN_FDNAMES"); + + if (listen_pid != NULL && listen_fds != NULL && strtol(listen_pid, NULL, 10) == getpid()) + { + // save systemd socket environment for rootless child + do_socket_activation = true; + saved_systemd_listen_pid = strdup(listen_pid); + saved_systemd_listen_fds = strdup(listen_fds); + if (listen_fdnames != NULL) + saved_systemd_listen_fdnames = strdup(listen_fdnames); + if (saved_systemd_listen_pid == NULL + || saved_systemd_listen_fds == NULL) + { + fprintf (stderr, "save socket listen environments error: %m\n"); + _exit (EXIT_FAILURE); + } + } + + /* Shortcut. If we are able to join the pause pid file, do it now so we don't + need to re-exec. */ + xdg_runtime_dir = getenv ("XDG_RUNTIME_DIR"); + if (geteuid () != 0 && xdg_runtime_dir && xdg_runtime_dir[0] && can_use_shortcut ()) + { + cleanup_free char *cwd = NULL; + cleanup_close int userns_fd = -1; + cleanup_close int mntns_fd = -1; + cleanup_close int fd = -1; + long pid; + char buf[12]; + uid_t uid; + gid_t gid; + char path[PATH_MAX]; + const char *const suffix = "/libpod/tmp/pause.pid"; + char uid_fmt[16]; + char gid_fmt[16]; + size_t len; + int r; + + cwd = getcwd (NULL, 0); + if (cwd == NULL) + { + fprintf (stderr, "error getting current working directory: %m\n"); + _exit (EXIT_FAILURE); + } + + len = snprintf (path, PATH_MAX, "%s%s", xdg_runtime_dir, suffix); + if (len >= PATH_MAX) + { + errno = ENAMETOOLONG; + fprintf (stderr, "invalid value for XDG_RUNTIME_DIR: %m"); + exit (EXIT_FAILURE); + } + + fd = open (path, O_RDONLY); + if (fd < 0) + return; + + r = TEMP_FAILURE_RETRY (read (fd, buf, sizeof (buf) - 1)); + + if (r < 0) + return; + buf[r] = '\0'; + + pid = strtol (buf, NULL, 10); + if (pid == LONG_MAX) + return; + + uid = geteuid (); + gid = getegid (); + + userns_fd = open_namespace (pid, "user"); + if (userns_fd < 0) + return; + + mntns_fd = open_namespace (pid, "mnt"); + if (mntns_fd < 0) + return; + + if (setns (userns_fd, 0) < 0) + return; + + /* The user namespace was joined, after this point errors are + not recoverable anymore. */ + + if (setns (mntns_fd, 0) < 0) + { + fprintf (stderr, "cannot join mount namespace for %ld: %m", pid); + exit (EXIT_FAILURE); + } + + sprintf (uid_fmt, "%d", uid); + sprintf (gid_fmt, "%d", gid); + + setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1); + setenv ("_CONTAINERS_ROOTLESS_UID", uid_fmt, 1); + setenv ("_CONTAINERS_ROOTLESS_GID", gid_fmt, 1); + + if (syscall_setresgid (0, 0, 0) < 0) + { + fprintf (stderr, "cannot setresgid: %m\n"); + _exit (EXIT_FAILURE); + } + + if (syscall_setresuid (0, 0, 0) < 0) + { + fprintf (stderr, "cannot setresuid: %m\n"); + _exit (EXIT_FAILURE); + } + + if (chdir (cwd) < 0) + { + fprintf (stderr, "cannot chdir to %s: %m\n", cwd); + _exit (EXIT_FAILURE); + } + + rootless_uid_init = uid; + rootless_gid_init = gid; + } +} + +static int +syscall_clone (unsigned long flags, void *child_stack) +{ +#if defined(__s390__) || defined(__CRIS__) + return (int) syscall (__NR_clone, child_stack, flags); +#else + return (int) syscall (__NR_clone, flags, child_stack); +#endif +} + +int +reexec_in_user_namespace_wait (int pid, int options) +{ + pid_t p; + int status; + + p = TEMP_FAILURE_RETRY (waitpid (pid, &status, 0)); + if (p < 0) + return -1; + + if (WIFEXITED (status)) + return WEXITSTATUS (status); + if (WIFSIGNALED (status)) + return 128 + WTERMSIG (status); + return -1; +} + +static int +create_pause_process (const char *pause_pid_file_path, char **argv) +{ + pid_t pid; + int p[2]; + + if (pipe (p) < 0) + return -1; + + pid = fork (); + if (pid < 0) + { + close (p[0]); + close (p[1]); + return -1; + } + + if (pid) + { + char b; + int r; + + close (p[1]); + /* Block until we write the pid file. */ + r = TEMP_FAILURE_RETRY (read (p[0], &b, 1)); + close (p[0]); + + reexec_in_user_namespace_wait (pid, 0); + + return r == 1 && b == '0' ? 0 : -1; + } + else + { + int r, fd; + + close (p[0]); + + setsid (); + pid = fork (); + if (pid < 0) + _exit (EXIT_FAILURE); + + if (pid) + { + char pid_str[12]; + char *tmp_file_path = NULL; + + sprintf (pid_str, "%d", pid); + + if (asprintf (&tmp_file_path, "%s.XXXXXX", pause_pid_file_path) < 0) + { + fprintf (stderr, "unable to print to string\n"); + kill (pid, SIGKILL); + _exit (EXIT_FAILURE); + } + + if (tmp_file_path == NULL) + { + fprintf (stderr, "temporary file path is NULL\n"); + kill (pid, SIGKILL); + _exit (EXIT_FAILURE); + } + + fd = mkstemp (tmp_file_path); + if (fd < 0) + { + fprintf (stderr, "error creating temporary file: %m\n"); + kill (pid, SIGKILL); + _exit (EXIT_FAILURE); + } + + r = TEMP_FAILURE_RETRY (write (fd, pid_str, strlen (pid_str))); + if (r < 0) + { + fprintf (stderr, "cannot write to file descriptor: %m\n"); + kill (pid, SIGKILL); + _exit (EXIT_FAILURE); + } + close (fd); + + /* There can be another process at this point trying to configure the user namespace and the pause + process, do not override the pid file if it already exists. */ + if (rename_noreplace (AT_FDCWD, tmp_file_path, AT_FDCWD, pause_pid_file_path) < 0) + { + unlink (tmp_file_path); + kill (pid, SIGKILL); + _exit (EXIT_FAILURE); + } + + r = TEMP_FAILURE_RETRY (write (p[1], "0", 1)); + if (r < 0) + { + fprintf (stderr, "cannot write to pipe: %m\n"); + _exit (EXIT_FAILURE); + } + close (p[1]); + + _exit (EXIT_SUCCESS); + } + else + { + int null; + + close (p[1]); + + null = open ("/dev/null", O_RDWR); + if (null >= 0) + { + dup2 (null, 0); + dup2 (null, 1); + dup2 (null, 2); + close (null); + } + + for (fd = 3; fd < open_files_max_fd + 16; fd++) + close (fd); + + setenv ("_PODMAN_PAUSE", "1", 1); + execlp (argv[0], argv[0], NULL); + + /* If the execve fails, then do the pause here. */ + do_pause (); + _exit (EXIT_FAILURE); + } + } +} + +static void +join_namespace_or_die (const char *name, int ns_fd) +{ + if (setns (ns_fd, 0) < 0) + { + fprintf (stderr, "cannot set %s namespace\n", name); + _exit (EXIT_FAILURE); + } +} + +int +reexec_userns_join (int pid_to_join, char *pause_pid_file_path) +{ + cleanup_close int userns_fd = -1; + cleanup_close int mntns_fd = -1; + cleanup_free char *cwd = NULL; + char uid[16]; + char gid[16]; + cleanup_free char *argv0 = NULL; + cleanup_free char **argv = NULL; + int pid; + sigset_t sigset, oldsigset; + + cwd = getcwd (NULL, 0); + if (cwd == NULL) + { + fprintf (stderr, "error getting current working directory: %m\n"); + _exit (EXIT_FAILURE); + } + + sprintf (uid, "%d", geteuid ()); + sprintf (gid, "%d", getegid ()); + + argv = get_cmd_line_args (); + if (argv == NULL) + { + fprintf (stderr, "cannot read argv: %m\n"); + _exit (EXIT_FAILURE); + } + + argv0 = argv[0]; + + userns_fd = open_namespace (pid_to_join, "user"); + if (userns_fd < 0) + return userns_fd; + mntns_fd = open_namespace (pid_to_join, "mnt"); + if (mntns_fd < 0) + return mntns_fd; + + pid = fork (); + if (pid < 0) + fprintf (stderr, "cannot fork: %m\n"); + + if (pid) + { + int f; + + for (f = 3; f <= open_files_max_fd; f++) + if (is_fd_inherited (f)) + close (f); + if (do_socket_activation) + { + unsetenv ("LISTEN_PID"); + unsetenv ("LISTEN_FDS"); + unsetenv ("LISTEN_FDNAMES"); + } + + return pid; + } + + if (sigfillset (&sigset) < 0) + { + fprintf (stderr, "cannot fill sigset: %m\n"); + _exit (EXIT_FAILURE); + } + if (sigdelset (&sigset, SIGCHLD) < 0) + { + fprintf (stderr, "cannot sigdelset(SIGCHLD): %m\n"); + _exit (EXIT_FAILURE); + } + if (sigdelset (&sigset, SIGTERM) < 0) + { + fprintf (stderr, "cannot sigdelset(SIGTERM): %m\n"); + _exit (EXIT_FAILURE); + } + if (sigprocmask (SIG_BLOCK, &sigset, &oldsigset) < 0) + { + fprintf (stderr, "cannot block signals: %m\n"); + _exit (EXIT_FAILURE); + } + + if (do_socket_activation) + { + char s[32]; + sprintf (s, "%d", getpid()); + setenv ("LISTEN_PID", s, true); + setenv ("LISTEN_FDS", saved_systemd_listen_fds, true); + // Setting fdnames is optional for systemd_socket_activation + if (saved_systemd_listen_fdnames != NULL) + setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true); + } + + setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1); + setenv ("_CONTAINERS_ROOTLESS_UID", uid, 1); + setenv ("_CONTAINERS_ROOTLESS_GID", gid, 1); + + if (prctl (PR_SET_PDEATHSIG, SIGTERM, 0, 0, 0) < 0) + { + fprintf (stderr, "cannot prctl(PR_SET_PDEATHSIG): %m\n"); + _exit (EXIT_FAILURE); + } + + join_namespace_or_die ("user", userns_fd); + join_namespace_or_die ("mnt", mntns_fd); + + if (syscall_setresgid (0, 0, 0) < 0) + { + fprintf (stderr, "cannot setresgid: %m\n"); + _exit (EXIT_FAILURE); + } + + if (syscall_setresuid (0, 0, 0) < 0) + { + fprintf (stderr, "cannot setresuid: %m\n"); + _exit (EXIT_FAILURE); + } + + if (chdir (cwd) < 0) + { + fprintf (stderr, "cannot chdir to %s: %m\n", cwd); + _exit (EXIT_FAILURE); + } + + if (pause_pid_file_path && pause_pid_file_path[0] != '\0') + { + /* We ignore errors here as we didn't create the namespace anyway. */ + create_pause_process (pause_pid_file_path, argv); + } + if (sigprocmask (SIG_SETMASK, &oldsigset, NULL) < 0) + { + fprintf (stderr, "cannot block signals: %m\n"); + _exit (EXIT_FAILURE); + } + + execvp (argv[0], argv); + + _exit (EXIT_FAILURE); +} + +static void +check_proc_sys_userns_file (const char *path) +{ + FILE *fp; + fp = fopen (path, "r"); + if (fp) + { + char buf[32]; + size_t n_read = fread (buf, 1, sizeof(buf) - 1, fp); + if (n_read > 0) + { + buf[n_read] = '\0'; + if (strtol (buf, NULL, 10) == 0) + fprintf (stderr, "user namespaces are not enabled in %s\n", path); + } + fclose (fp); + } +} + +static int +copy_file_to_fd (const char *file_to_read, int outfd) +{ + char buf[512]; + cleanup_close int fd = -1; + + fd = open (file_to_read, O_RDONLY); + if (fd < 0) + return fd; + + for (;;) + { + ssize_t r, w, t = 0; + + r = TEMP_FAILURE_RETRY (read (fd, buf, sizeof buf)); + if (r < 0) + return r; + + if (r == 0) + break; + + while (t < r) + { + w = TEMP_FAILURE_RETRY (write (outfd, &buf[t], r - t)); + if (w < 0) + return w; + t += w; + } + } + return 0; +} + +int +reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_read, int outputfd) +{ + cleanup_free char **argv = NULL; + cleanup_free char *argv0 = NULL; + cleanup_free char *cwd = NULL; + sigset_t sigset, oldsigset; + int ret; + pid_t pid; + char b; + char uid[16]; + char gid[16]; + + cwd = getcwd (NULL, 0); + if (cwd == NULL) + { + fprintf (stderr, "error getting current working directory: %m\n"); + _exit (EXIT_FAILURE); + } + + sprintf (uid, "%d", geteuid ()); + sprintf (gid, "%d", getegid ()); + + pid = syscall_clone (CLONE_NEWUSER|CLONE_NEWNS|SIGCHLD, NULL); + if (pid < 0) + { + fprintf (stderr, "cannot clone: %m\n"); + check_proc_sys_userns_file (_max_user_namespaces); + check_proc_sys_userns_file (_unprivileged_user_namespaces); + } + if (pid) + { + if (do_socket_activation) + { + long num_fds; + + num_fds = strtol (saved_systemd_listen_fds, NULL, 10); + if (num_fds != LONG_MIN && num_fds != LONG_MAX) + { + int f; + + for (f = 3; f < num_fds + 3; f++) + if (is_fd_inherited (f)) + close (f); + } + unsetenv ("LISTEN_PID"); + unsetenv ("LISTEN_FDS"); + unsetenv ("LISTEN_FDNAMES"); + } + return pid; + } + + if (sigfillset (&sigset) < 0) + { + fprintf (stderr, "cannot fill sigset: %m\n"); + _exit (EXIT_FAILURE); + } + if (sigdelset (&sigset, SIGCHLD) < 0) + { + fprintf (stderr, "cannot sigdelset(SIGCHLD): %m\n"); + _exit (EXIT_FAILURE); + } + if (sigdelset (&sigset, SIGTERM) < 0) + { + fprintf (stderr, "cannot sigdelset(SIGTERM): %m\n"); + _exit (EXIT_FAILURE); + } + if (sigprocmask (SIG_BLOCK, &sigset, &oldsigset) < 0) + { + fprintf (stderr, "cannot block signals: %m\n"); + _exit (EXIT_FAILURE); + } + + argv = get_cmd_line_args (); + if (argv == NULL) + { + fprintf (stderr, "cannot read argv: %m\n"); + _exit (EXIT_FAILURE); + } + + argv0 = argv[0]; + + if (do_socket_activation) + { + char s[32]; + sprintf (s, "%d", getpid()); + setenv ("LISTEN_PID", s, true); + setenv ("LISTEN_FDS", saved_systemd_listen_fds, true); + // Setting fdnames is optional for systemd_socket_activation + if (saved_systemd_listen_fdnames != NULL) + setenv ("LISTEN_FDNAMES", saved_systemd_listen_fdnames, true); + } + + setenv ("_CONTAINERS_USERNS_CONFIGURED", "init", 1); + setenv ("_CONTAINERS_ROOTLESS_UID", uid, 1); + setenv ("_CONTAINERS_ROOTLESS_GID", gid, 1); + + ret = TEMP_FAILURE_RETRY (read (ready, &b, 1)); + if (ret < 0) + { + fprintf (stderr, "cannot read from sync pipe: %m\n"); + _exit (EXIT_FAILURE); + } + if (ret != 1 || b != '0') + _exit (EXIT_FAILURE); + + if (syscall_setresgid (0, 0, 0) < 0) + { + fprintf (stderr, "cannot setresgid: %m\n"); + TEMP_FAILURE_RETRY (write (ready, "1", 1)); + _exit (EXIT_FAILURE); + } + + if (syscall_setresuid (0, 0, 0) < 0) + { + fprintf (stderr, "cannot setresuid: %m\n"); + TEMP_FAILURE_RETRY (write (ready, "1", 1)); + _exit (EXIT_FAILURE); + } + + if (chdir (cwd) < 0) + { + fprintf (stderr, "cannot chdir to %s: %m\n", cwd); + TEMP_FAILURE_RETRY (write (ready, "1", 1)); + _exit (EXIT_FAILURE); + } + + if (pause_pid_file_path && pause_pid_file_path[0] != '\0') + { + if (create_pause_process (pause_pid_file_path, argv) < 0) + { + TEMP_FAILURE_RETRY (write (ready, "2", 1)); + _exit (EXIT_FAILURE); + } + } + + ret = TEMP_FAILURE_RETRY (write (ready, "0", 1)); + if (ret < 0) + { + fprintf (stderr, "cannot write to ready pipe: %m\n"); + _exit (EXIT_FAILURE); + } + close (ready); + + if (sigprocmask (SIG_SETMASK, &oldsigset, NULL) < 0) + { + fprintf (stderr, "cannot block signals: %m\n"); + _exit (EXIT_FAILURE); + } + + if (file_to_read && file_to_read[0]) + { + ret = copy_file_to_fd (file_to_read, outputfd); + close (outputfd); + _exit (ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE); + } + + execvp (argv[0], argv); + + _exit (EXIT_FAILURE); +} diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go new file mode 100644 index 00000000000..5af9a978b0e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go @@ -0,0 +1,623 @@ +//go:build linux && cgo +// +build linux,cgo + +package rootless + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + gosignal "os/signal" + "os/user" + "runtime" + "strconv" + "strings" + "sync" + "unsafe" + + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/storage/pkg/idtools" + pmount "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" + "golang.org/x/sys/unix" +) + +/* +#cgo remote CFLAGS: -Wall -Werror -DDISABLE_JOIN_SHORTCUT +#include +#include +extern uid_t rootless_uid(); +extern uid_t rootless_gid(); +extern int reexec_in_user_namespace(int ready, char *pause_pid_file_path, char *file_to_read, int fd); +extern int reexec_in_user_namespace_wait(int pid, int options); +extern int reexec_userns_join(int pid, char *pause_pid_file_path); +extern int is_fd_inherited(int fd); +*/ +import "C" + +const ( + numSig = 65 // max number of signals +) + +func runInUser() error { + return os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done") +} + +var ( + isRootlessOnce sync.Once + isRootless bool +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + isRootlessOnce.Do(func() { + rootlessUIDInit := int(C.rootless_uid()) + rootlessGIDInit := int(C.rootless_gid()) + if rootlessUIDInit != 0 { + // This happens if we joined the user+mount namespace as part of + if err := os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done"); err != nil { + logrus.Errorf("Failed to set environment variable %s as %s", "_CONTAINERS_USERNS_CONFIGURED", "done") + } + if err := os.Setenv("_CONTAINERS_ROOTLESS_UID", fmt.Sprintf("%d", rootlessUIDInit)); err != nil { + logrus.Errorf("Failed to set environment variable %s as %d", "_CONTAINERS_ROOTLESS_UID", rootlessUIDInit) + } + if err := os.Setenv("_CONTAINERS_ROOTLESS_GID", fmt.Sprintf("%d", rootlessGIDInit)); err != nil { + logrus.Errorf("Failed to set environment variable %s as %d", "_CONTAINERS_ROOTLESS_GID", rootlessGIDInit) + } + } + isRootless = os.Geteuid() != 0 || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" + if !isRootless { + hasCapSysAdmin, err := unshare.HasCapSysAdmin() + if err != nil { + logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process") + } + if err == nil && !hasCapSysAdmin { + isRootless = true + } + } + }) + return isRootless +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +// GetRootlessGID returns the GID of the user in the parent userNS +func GetRootlessGID() int { + gidEnv := os.Getenv("_CONTAINERS_ROOTLESS_GID") + if gidEnv != "" { + u, _ := strconv.Atoi(gidEnv) + return u + } + + /* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */ + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getegid() +} + +func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) error { + var tool = "newuidmap" + mode := os.ModeSetuid + cap := capability.CAP_SETUID + idtype := "setuid" + if !uid { + tool = "newgidmap" + mode = os.ModeSetgid + cap = capability.CAP_SETGID + idtype = "setgid" + } + path, err := exec.LookPath(tool) + if err != nil { + return errors.Wrapf(err, "command required for rootless mode with multiple IDs") + } + + appendTriplet := func(l []string, a, b, c int) []string { + return append(l, strconv.Itoa(a), strconv.Itoa(b), strconv.Itoa(c)) + } + + args := []string{path, fmt.Sprintf("%d", pid)} + args = appendTriplet(args, 0, hostID, 1) + for _, i := range mappings { + if hostID >= i.HostID && hostID < i.HostID+i.Size { + what := "UID" + where := "/etc/subuid" + if !uid { + what = "GID" + where = "/etc/subgid" + } + return errors.Errorf("invalid configuration: the specified mapping %d:%d in %q includes the user %s", i.HostID, i.Size, where, what) + } + args = appendTriplet(args, i.ContainerID+1, i.HostID, i.Size) + } + cmd := exec.Cmd{ + Path: path, + Args: args, + } + + if output, err := cmd.CombinedOutput(); err != nil { + logrus.Errorf("running `%s`: %s", strings.Join(args, " "), output) + errorStr := fmt.Sprintf("cannot setup namespace using %q", path) + if isSet, err := unshare.IsSetID(cmd.Path, mode, cap); err != nil { + logrus.Errorf("Failed to check for %s on %s: %v", idtype, path, err) + } else if !isSet { + errorStr = fmt.Sprintf("%s: should have %s or have filecaps %s", errorStr, idtype, idtype) + } + return errors.Wrapf(err, errorStr) + } + return nil +} + +// joinUserAndMountNS re-exec podman in a new userNS and join the user and mount +// namespace of the specified PID without looking up its parent. Useful to join directly +// the conmon process. +func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) { + hasCapSysAdmin, err := unshare.HasCapSysAdmin() + if err != nil { + return false, 0, err + } + if hasCapSysAdmin || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" { + return false, 0, nil + } + + cPausePid := C.CString(pausePid) + defer C.free(unsafe.Pointer(cPausePid)) + + pidC := C.reexec_userns_join(C.int(pid), cPausePid) + if int(pidC) < 0 { + return false, -1, errors.Errorf("cannot re-exec process") + } + + ret := C.reexec_in_user_namespace_wait(pidC, 0) + if ret < 0 { + return false, -1, errors.New("waiting for the re-exec process") + } + + return true, int(ret), nil +} + +// GetConfiguredMappings returns the additional IDs configured for the current user. +func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { + var uids, gids []idtools.IDMap + username := os.Getenv("USER") + if username == "" { + var id string + if os.Geteuid() == 0 { + id = strconv.Itoa(GetRootlessUID()) + } else { + id = strconv.Itoa(os.Geteuid()) + } + userID, err := user.LookupId(id) + if err == nil { + username = userID.Username + } + } + mappings, err := idtools.NewIDMappings(username, username) + if err != nil { + logLevel := logrus.ErrorLevel + if os.Geteuid() == 0 && GetRootlessUID() == 0 { + logLevel = logrus.DebugLevel + } + logrus.StandardLogger().Logf(logLevel, "cannot find UID/GID for user %s: %v - check rootless mode in man pages.", username, err) + } else { + uids = mappings.UIDs() + gids = mappings.GIDs() + } + return uids, gids, nil +} + +func copyMappings(from, to string) error { + content, err := ioutil.ReadFile(from) + if err != nil { + return err + } + // Both runc and crun check whether the current process is in a user namespace + // by looking up 4294967295 in /proc/self/uid_map. If the mappings would be + // copied as they are, the check in the OCI runtimes would fail. So just split + // it in two different ranges. + if bytes.Contains(content, []byte("4294967295")) { + content = []byte("0 0 1\n1 1 4294967294\n") + } + return ioutil.WriteFile(to, content, 0600) +} + +func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ bool, _ int, retErr error) { + hasCapSysAdmin, err := unshare.HasCapSysAdmin() + if err != nil { + return false, 0, err + } + + if hasCapSysAdmin || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" { + if os.Getenv("_CONTAINERS_USERNS_CONFIGURED") == "init" { + return false, 0, runInUser() + } + return false, 0, nil + } + + if mounts, err := pmount.GetMounts(); err == nil { + for _, m := range mounts { + if m.Mountpoint == "/" { + isShared := false + for _, o := range strings.Split(m.Optional, ",") { + if strings.HasPrefix(o, "shared:") { + isShared = true + break + } + } + if !isShared { + logrus.Warningf("%q is not a shared mount, this could cause issues or missing mounts with rootless containers", m.Mountpoint) + } + break + } + } + } + + cPausePid := C.CString(pausePid) + defer C.free(unsafe.Pointer(cPausePid)) + + cFileToRead := C.CString(fileToRead) + defer C.free(unsafe.Pointer(cFileToRead)) + var fileOutputFD C.int + if fileOutput != nil { + fileOutputFD = C.int(fileOutput.Fd()) + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_DGRAM, 0) + if err != nil { + return false, -1, err + } + r, w := os.NewFile(uintptr(fds[0]), "sync host"), os.NewFile(uintptr(fds[1]), "sync child") + + var pid int + + defer errorhandling.CloseQuiet(r) + defer errorhandling.CloseQuiet(w) + defer func() { + toWrite := []byte("0") + if retErr != nil { + toWrite = []byte("1") + } + if _, err := w.Write(toWrite); err != nil { + logrus.Errorf("Failed to write byte 0: %q", err) + } + if retErr != nil && pid > 0 { + if err := unix.Kill(pid, unix.SIGKILL); err != nil { + if err != unix.ESRCH { + logrus.Errorf("Failed to cleanup process %d: %v", pid, err) + } + } + C.reexec_in_user_namespace_wait(C.int(pid), 0) + } + }() + + pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD) + pid = int(pidC) + if pid < 0 { + return false, -1, errors.Errorf("cannot re-exec process") + } + + uids, gids, err := GetConfiguredMappings() + if err != nil { + return false, -1, err + } + + uidMap := fmt.Sprintf("/proc/%d/uid_map", pid) + gidMap := fmt.Sprintf("/proc/%d/gid_map", pid) + + uidsMapped := false + + if err := copyMappings("/proc/self/uid_map", uidMap); err == nil { + uidsMapped = true + } + + if uids != nil && !uidsMapped { + err := tryMappingTool(true, pid, os.Geteuid(), uids) + // If some mappings were specified, do not ignore the error + if err != nil && len(uids) > 0 { + return false, -1, err + } + uidsMapped = err == nil + } + if !uidsMapped { + logrus.Warnf("Using rootless single mapping into the namespace. This might break some images. Check /etc/subuid and /etc/subgid for adding sub*ids if not using a network user") + setgroups := fmt.Sprintf("/proc/%d/setgroups", pid) + err = ioutil.WriteFile(setgroups, []byte("deny\n"), 0666) + if err != nil { + return false, -1, errors.Wrapf(err, "cannot write setgroups file") + } + logrus.Debugf("write setgroups file exited with 0") + + err = ioutil.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Geteuid())), 0666) + if err != nil { + return false, -1, errors.Wrapf(err, "cannot write uid_map") + } + logrus.Debugf("write uid_map exited with 0") + } + + gidsMapped := false + if err := copyMappings("/proc/self/gid_map", gidMap); err == nil { + gidsMapped = true + } + if gids != nil && !gidsMapped { + err := tryMappingTool(false, pid, os.Getegid(), gids) + // If some mappings were specified, do not ignore the error + if err != nil && len(gids) > 0 { + return false, -1, err + } + gidsMapped = err == nil + } + if !gidsMapped { + err = ioutil.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getegid())), 0666) + if err != nil { + return false, -1, errors.Wrapf(err, "cannot write gid_map") + } + } + + _, err = w.Write([]byte("0")) + if err != nil { + return false, -1, errors.Wrapf(err, "write to sync pipe") + } + + b := make([]byte, 1) + _, err = w.Read(b) + if err != nil { + return false, -1, errors.Wrapf(err, "read from sync pipe") + } + + if fileOutput != nil { + ret := C.reexec_in_user_namespace_wait(pidC, 0) + if ret < 0 { + return false, -1, errors.New("waiting for the re-exec process") + } + + return true, 0, nil + } + + if b[0] == '2' { + // We have lost the race for writing the PID file, as probably another + // process created a namespace and wrote the PID. + // Try to join it. + data, err := ioutil.ReadFile(pausePid) + if err == nil { + pid, err := strconv.ParseUint(string(data), 10, 0) + if err == nil { + return joinUserAndMountNS(uint(pid), "") + } + } + return false, -1, errors.New("setting up the process") + } + + if b[0] != '0' { + return false, -1, errors.New("setting up the process") + } + + signals := []os.Signal{} + for sig := 0; sig < numSig; sig++ { + if sig == int(unix.SIGTSTP) { + continue + } + signals = append(signals, unix.Signal(sig)) + } + + c := make(chan os.Signal, len(signals)) + gosignal.Notify(c, signals...) + defer gosignal.Reset() + go func() { + for s := range c { + if s == unix.SIGCHLD || s == unix.SIGPIPE { + continue + } + + if err := unix.Kill(int(pidC), s.(unix.Signal)); err != nil { + if err != unix.ESRCH { + logrus.Errorf("Failed to propagate signal to child process %d: %v", int(pidC), err) + } + } + } + }() + + ret := C.reexec_in_user_namespace_wait(pidC, 0) + if ret < 0 { + return false, -1, errors.New("waiting for the re-exec process") + } + + return true, int(ret), nil +} + +// BecomeRootInUserNS re-exec podman in a new userNS. It returns whether podman was re-executed +// into a new user namespace and the return code from the re-executed podman process. +// If podman was re-executed the caller needs to propagate the error code returned by the child +// process. +func BecomeRootInUserNS(pausePid string) (bool, int, error) { + return becomeRootInUserNS(pausePid, "", nil) +} + +// TryJoinFromFilePaths attempts to join the namespaces of the pid files in paths. +// This is useful when there are already running containers and we +// don't have a pause process yet. We can use the paths to the conmon +// processes to attempt joining their namespaces. +// If needNewNamespace is set, the file is read from a temporary user +// namespace, this is useful for containers that are running with a +// different uidmap and the unprivileged user has no way to read the +// file owned by the root in the container. +func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []string) (bool, int, error) { + if len(paths) == 0 { + return BecomeRootInUserNS(pausePidPath) + } + + var lastErr error + var pausePid int + foundProcess := false + + for _, path := range paths { + if !needNewNamespace { + data, err := ioutil.ReadFile(path) + if err != nil { + lastErr = err + continue + } + + pausePid, err = strconv.Atoi(string(data)) + if err != nil { + lastErr = errors.Wrapf(err, "cannot parse file %s", path) + continue + } + + lastErr = nil + break + } else { + r, w, err := os.Pipe() + if err != nil { + lastErr = err + continue + } + + defer errorhandling.CloseQuiet(r) + + if _, _, err := becomeRootInUserNS("", path, w); err != nil { + w.Close() + lastErr = err + continue + } + + if err := w.Close(); err != nil { + return false, 0, err + } + defer func() { + C.reexec_in_user_namespace_wait(-1, 0) + }() + + b := make([]byte, 32) + + n, err := r.Read(b) + if err != nil { + lastErr = errors.Wrapf(err, "cannot read %s\n", path) + continue + } + + pausePid, err = strconv.Atoi(string(b[:n])) + if err == nil && unix.Kill(pausePid, 0) == nil { + foundProcess = true + lastErr = nil + break + } + } + } + if !foundProcess && pausePidPath != "" { + return BecomeRootInUserNS(pausePidPath) + } + if lastErr != nil { + return false, 0, lastErr + } + + return joinUserAndMountNS(uint(pausePid), pausePidPath) +} + +// ReadMappingsProc parses and returns the ID mappings at the specified path. +func ReadMappingsProc(path string) ([]idtools.IDMap, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + mappings := []idtools.IDMap{} + + buf := bufio.NewReader(file) + for { + line, _, err := buf.ReadLine() + if err != nil { + if err == io.EOF { + return mappings, nil + } + return nil, errors.Wrapf(err, "cannot read line from %s", path) + } + if line == nil { + return mappings, nil + } + + containerID, hostID, size := 0, 0, 0 + if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil { + return nil, errors.Wrapf(err, "cannot parse %s", string(line)) + } + mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size}) + } +} + +func matches(id int, configuredIDs []idtools.IDMap, currentIDs []idtools.IDMap) bool { + // The first mapping is the host user, handle it separately. + if currentIDs[0].HostID != id || currentIDs[0].Size != 1 { + return false + } + + currentIDs = currentIDs[1:] + if len(currentIDs) != len(configuredIDs) { + return false + } + + // It is fine to iterate sequentially as both slices are sorted. + for i := range currentIDs { + if currentIDs[i].HostID != configuredIDs[i].HostID { + return false + } + if currentIDs[i].Size != configuredIDs[i].Size { + return false + } + } + + return true +} + +// ConfigurationMatches checks whether the additional uids/gids configured for the user +// match the current user namespace. +func ConfigurationMatches() (bool, error) { + if !IsRootless() || os.Geteuid() != 0 { + return true, nil + } + + uids, gids, err := GetConfiguredMappings() + if err != nil { + return false, err + } + + currentUIDs, err := ReadMappingsProc("/proc/self/uid_map") + if err != nil { + return false, err + } + + if !matches(GetRootlessUID(), uids, currentUIDs) { + return false, err + } + + currentGIDs, err := ReadMappingsProc("/proc/self/gid_map") + if err != nil { + return false, err + } + + return matches(GetRootlessGID(), gids, currentGIDs), nil +} + +// IsFdInherited checks whether the fd is opened and valid to use +func IsFdInherited(fd int) bool { + return int(C.is_fd_inherited(C.int(fd))) > 0 +} diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go new file mode 100644 index 00000000000..fe164e2350c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go @@ -0,0 +1,72 @@ +//go:build !linux || !cgo +// +build !linux !cgo + +package rootless + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" +) + +// IsRootless returns whether the user is rootless +func IsRootless() bool { + uid := os.Geteuid() + // os.Geteuid() on Windows returns -1 + if uid == -1 { + return false + } + return uid != 0 +} + +// BecomeRootInUserNS re-exec podman in a new userNS. It returns whether podman was re-executed +// into a new user namespace and the return code from the re-executed podman process. +// If podman was re-executed the caller needs to propagate the error code returned by the child +// process. It is a convenience function for BecomeRootInUserNSWithOpts with a default configuration. +func BecomeRootInUserNS(pausePid string) (bool, int, error) { + return false, -1, errors.New("this function is not supported on this os") +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return -1 +} + +// GetRootlessGID returns the GID of the user in the parent userNS +func GetRootlessGID() int { + return -1 +} + +// TryJoinFromFilePaths attempts to join the namespaces of the pid files in paths. +// This is useful when there are already running containers and we +// don't have a pause process yet. We can use the paths to the conmon +// processes to attempt joining their namespaces. +// If needNewNamespace is set, the file is read from a temporary user +// namespace, this is useful for containers that are running with a +// different uidmap and the unprivileged user has no way to read the +// file owned by the root in the container. +func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []string) (bool, int, error) { + return false, -1, errors.New("this function is not supported on this os") +} + +// ConfigurationMatches checks whether the additional uids/gids configured for the user +// match the current user namespace. +func ConfigurationMatches() (bool, error) { + return true, nil +} + +// GetConfiguredMappings returns the additional IDs configured for the current user. +func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, errors.New("this function is not supported on this os") +} + +// ReadMappingsProc returns the uid_map and gid_map +func ReadMappingsProc(path string) ([]idtools.IDMap, error) { + return nil, nil +} + +// IsFdInherited checks whether the fd is opened and valid to use +func IsFdInherited(fd int) bool { + return false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go new file mode 100644 index 00000000000..5ea67843a5d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go @@ -0,0 +1,41 @@ +package signal + +import ( + "fmt" + "strconv" + "strings" + "syscall" +) + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("invalid signal: %s", rawSignal) + } + return sig, nil +} + +// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input +// can be a name or number representation i.e. "KILL" "9". +func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) { + basename := strings.TrimPrefix(rawSignal, "-") + s, err := ParseSignal(basename) + if err == nil { + return s, nil + } + for k, v := range signalMap { + if k == strings.ToUpper(basename) { + return v, nil + } + } + return -1, fmt.Errorf("invalid signal: %s", basename) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go new file mode 100644 index 00000000000..21e09c9fef0 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go @@ -0,0 +1,108 @@ +//go:build linux && !mips && !mipsle && !mips64 && !mips64le +// +build linux,!mips,!mipsle,!mips64,!mips64le + +// Signal handling for Linux only. +package signal + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" + "os/signal" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.SIGWINCH // For cross-compilation with Windows +) + +// signalMap is a map of Linux signals. +var signalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := make([]os.Signal, 0, len(signalMap)) + for _, s := range signalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go new file mode 100644 index 00000000000..52b07aaf463 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go @@ -0,0 +1,109 @@ +//go:build linux && (mips || mipsle || mips64 || mips64le) +// +build linux +// +build mips mipsle mips64 mips64le + +// Special signal handling for mips architecture +package signal + +// Copyright 2013-2018 Docker, Inc. + +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" + "os/signal" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 127 + + SIGWINCH = syscall.SIGWINCH +) + +// signalMap is a map of Linux signals. +var signalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "EMT": unix.SIGEMT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := make([]os.Signal, 0, len(signalMap)) + for _, s := range signalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go new file mode 100644 index 00000000000..c0aa62d2107 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go @@ -0,0 +1,100 @@ +//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd netbsd openbsd solaris zos + +// Signal handling for Linux only. +package signal + +import ( + "os" + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.SIGWINCH +) + +// signalMap is a map of Linux signals. +// These constants are sourced from the Linux version of golang.org/x/sys/unix +// (I don't see much risk of this changing). +// This should work as long as Podman only runs containers on Linux, which seems +// a safe assumption for now. +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.Signal(0x6), + "ALRM": syscall.Signal(0xe), + "BUS": syscall.Signal(0x7), + "CHLD": syscall.Signal(0x11), + "CLD": syscall.Signal(0x11), + "CONT": syscall.Signal(0x12), + "FPE": syscall.Signal(0x8), + "HUP": syscall.Signal(0x1), + "ILL": syscall.Signal(0x4), + "INT": syscall.Signal(0x2), + "IO": syscall.Signal(0x1d), + "IOT": syscall.Signal(0x6), + "KILL": syscall.Signal(0x9), + "PIPE": syscall.Signal(0xd), + "POLL": syscall.Signal(0x1d), + "PROF": syscall.Signal(0x1b), + "PWR": syscall.Signal(0x1e), + "QUIT": syscall.Signal(0x3), + "SEGV": syscall.Signal(0xb), + "STKFLT": syscall.Signal(0x10), + "STOP": syscall.Signal(0x13), + "SYS": syscall.Signal(0x1f), + "TERM": syscall.Signal(0xf), + "TRAP": syscall.Signal(0x5), + "TSTP": syscall.Signal(0x14), + "TTIN": syscall.Signal(0x15), + "TTOU": syscall.Signal(0x16), + "URG": syscall.Signal(0x17), + "USR1": syscall.Signal(0xa), + "USR2": syscall.Signal(0xc), + "VTALRM": syscall.Signal(0x1a), + "WINCH": syscall.Signal(0x1c), + "XCPU": syscall.Signal(0x18), + "XFSZ": syscall.Signal(0x19), + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go new file mode 100644 index 00000000000..d8bba7c905c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go @@ -0,0 +1,100 @@ +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos + +// Signal handling for Linux only. +package signal + +import ( + "os" + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.Signal(0xff) +) + +// signalMap is a map of Linux signals. +// These constants are sourced from the Linux version of golang.org/x/sys/unix +// (I don't see much risk of this changing). +// This should work as long as Podman only runs containers on Linux, which seems +// a safe assumption for now. +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.Signal(0x6), + "ALRM": syscall.Signal(0xe), + "BUS": syscall.Signal(0x7), + "CHLD": syscall.Signal(0x11), + "CLD": syscall.Signal(0x11), + "CONT": syscall.Signal(0x12), + "FPE": syscall.Signal(0x8), + "HUP": syscall.Signal(0x1), + "ILL": syscall.Signal(0x4), + "INT": syscall.Signal(0x2), + "IO": syscall.Signal(0x1d), + "IOT": syscall.Signal(0x6), + "KILL": syscall.Signal(0x9), + "PIPE": syscall.Signal(0xd), + "POLL": syscall.Signal(0x1d), + "PROF": syscall.Signal(0x1b), + "PWR": syscall.Signal(0x1e), + "QUIT": syscall.Signal(0x3), + "SEGV": syscall.Signal(0xb), + "STKFLT": syscall.Signal(0x10), + "STOP": syscall.Signal(0x13), + "SYS": syscall.Signal(0x1f), + "TERM": syscall.Signal(0xf), + "TRAP": syscall.Signal(0x5), + "TSTP": syscall.Signal(0x14), + "TTIN": syscall.Signal(0x15), + "TTOU": syscall.Signal(0x16), + "URG": syscall.Signal(0x17), + "USR1": syscall.Signal(0xa), + "USR2": syscall.Signal(0xc), + "VTALRM": syscall.Signal(0x1a), + "WINCH": syscall.Signal(0x1c), + "XCPU": syscall.Signal(0x18), + "XFSZ": syscall.Signal(0x19), + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go new file mode 100644 index 00000000000..a6bf77277ce --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go @@ -0,0 +1,14 @@ +//go:build !linux +// +build !linux + +package specgen + +import ( + "github.com/containers/common/libimage" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func (s *SpecGenerator) getSeccompConfig(configSpec *spec.Spec, img *libimage.Image) (*spec.LinuxSeccomp, error) { + return nil, errors.New("function not supported on non-linux OS's") +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go new file mode 100644 index 00000000000..5616a4511de --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go @@ -0,0 +1,203 @@ +package specgen + +import ( + "strconv" + "strings" + + "github.com/containers/common/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +var ( + // ErrInvalidSpecConfig describes an error that the given SpecGenerator is invalid + ErrInvalidSpecConfig = errors.New("invalid configuration") + // SystemDValues describes the only values that SystemD can be + SystemDValues = []string{"true", "false", "always"} + // SdNotifyModeValues describes the only values that SdNotifyMode can be + SdNotifyModeValues = []string{define.SdNotifyModeContainer, define.SdNotifyModeConmon, define.SdNotifyModeIgnore} + // ImageVolumeModeValues describes the only values that ImageVolumeMode can be + ImageVolumeModeValues = []string{"ignore", "tmpfs", "anonymous"} +) + +func exclusiveOptions(opt1, opt2 string) error { + return errors.Errorf("%s and %s are mutually exclusive options", opt1, opt2) +} + +// Validate verifies that the given SpecGenerator is valid and satisfies required +// input for creating a container. +func (s *SpecGenerator) Validate() error { + // Containers being added to a pod cannot have certain network attributes + // associated with them because those should be on the infra container. + if len(s.Pod) > 0 && s.NetNS.NSMode == FromPod { + if len(s.Networks) > 0 { + return errors.Wrap(define.ErrNetworkOnPodContainer, "networks must be defined when the pod is created") + } + if len(s.PortMappings) > 0 || s.PublishExposedPorts { + return errors.Wrap(define.ErrNetworkOnPodContainer, "published or exposed ports must be defined when the pod is created") + } + if len(s.HostAdd) > 0 { + return errors.Wrap(define.ErrNetworkOnPodContainer, "extra host entries must be specified on the pod") + } + } + + if s.NetNS.IsContainer() && len(s.HostAdd) > 0 { + return errors.Wrap(ErrInvalidSpecConfig, "cannot set extra host entries when the container is joined to another containers network namespace") + } + + // + // ContainerBasicConfig + // + // Rootfs and Image cannot both populated + if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 { + return errors.Wrap(ErrInvalidSpecConfig, "both image and rootfs cannot be simultaneously") + } + // Cannot set hostname and utsns + if len(s.ContainerBasicConfig.Hostname) > 0 && !s.ContainerBasicConfig.UtsNS.IsPrivate() { + if s.ContainerBasicConfig.UtsNS.IsPod() { + return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when joining the pod UTS namespace") + } + return errors.Wrap(ErrInvalidSpecConfig, "cannot set hostname when running in the host UTS namespace") + } + // systemd values must be true, false, or always + if len(s.ContainerBasicConfig.Systemd) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.Systemd), SystemDValues) { + return errors.Wrapf(ErrInvalidSpecConfig, "--systemd values must be one of %q", strings.Join(SystemDValues, ", ")) + } + // sdnotify values must be container, conmon, or ignore + if len(s.ContainerBasicConfig.SdNotifyMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.SdNotifyMode), SdNotifyModeValues) { + return errors.Wrapf(ErrInvalidSpecConfig, "--sdnotify values must be one of %q", strings.Join(SdNotifyModeValues, ", ")) + } + + // + // ContainerStorageConfig + // + // rootfs and image cannot both be set + if len(s.ContainerStorageConfig.Image) > 0 && len(s.ContainerStorageConfig.Rootfs) > 0 { + return exclusiveOptions("rootfs", "image") + } + // imagevolumemode must be one of ignore, tmpfs, or anonymous if given + if len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode), ImageVolumeModeValues) { + return errors.Errorf("invalid ImageVolumeMode %q, value must be one of %s", + s.ContainerStorageConfig.ImageVolumeMode, strings.Join(ImageVolumeModeValues, ",")) + } + // shmsize conflicts with IPC namespace + if s.ContainerStorageConfig.ShmSize != nil && (s.ContainerStorageConfig.IpcNS.IsHost() || s.ContainerStorageConfig.IpcNS.IsNone()) { + return errors.Errorf("cannot set shmsize when running in the %s IPC Namespace", s.ContainerStorageConfig.IpcNS) + } + + // + // ContainerSecurityConfig + // + // userns and idmappings conflict + if s.UserNS.IsPrivate() && s.IDMappings == nil { + return errors.Wrap(ErrInvalidSpecConfig, "IDMappings are required when not creating a User namespace") + } + + // + // ContainerCgroupConfig + // + // + // None for now + + // + // ContainerNetworkConfig + // + // useimageresolveconf conflicts with dnsserver, dnssearch, dnsoption + if s.UseImageResolvConf { + if len(s.DNSServers) > 0 { + return exclusiveOptions("UseImageResolvConf", "DNSServer") + } + if len(s.DNSSearch) > 0 { + return exclusiveOptions("UseImageResolvConf", "DNSSearch") + } + if len(s.DNSOptions) > 0 { + return exclusiveOptions("UseImageResolvConf", "DNSOption") + } + } + // UseImageHosts and HostAdd are exclusive + if s.UseImageHosts && len(s.HostAdd) > 0 { + return exclusiveOptions("UseImageHosts", "HostAdd") + } + + // TODO the specgen does not appear to handle this? Should it + // switch config.Cgroup.Cgroups { + // case "disabled": + // if addedResources { + // return errors.New("cannot specify resource limits when cgroups are disabled is specified") + // } + // configSpec.Linux.Resources = &spec.LinuxResources{} + // case "enabled", "no-conmon", "": + // // Do nothing + // default: + // return errors.New("unrecognized option for cgroups; supported are 'default', 'disabled', 'no-conmon'") + // } + invalidUlimitFormatError := errors.New("invalid default ulimit definition must be form of type=soft:hard") + // set ulimits if not rootless + if len(s.ContainerResourceConfig.Rlimits) < 1 && !rootless.IsRootless() { + // Containers common defines this as something like nproc=4194304:4194304 + tmpnproc := containerConfig.Ulimits() + var posixLimits []specs.POSIXRlimit + for _, limit := range tmpnproc { + limitSplit := strings.SplitN(limit, "=", 2) + if len(limitSplit) < 2 { + return errors.Wrapf(invalidUlimitFormatError, "missing = in %s", limit) + } + valueSplit := strings.SplitN(limitSplit[1], ":", 2) + if len(valueSplit) < 2 { + return errors.Wrapf(invalidUlimitFormatError, "missing : in %s", limit) + } + hard, err := strconv.Atoi(valueSplit[0]) + if err != nil { + return err + } + soft, err := strconv.Atoi(valueSplit[1]) + if err != nil { + return err + } + posixLimit := specs.POSIXRlimit{ + Type: limitSplit[0], + Hard: uint64(hard), + Soft: uint64(soft), + } + posixLimits = append(posixLimits, posixLimit) + } + s.ContainerResourceConfig.Rlimits = posixLimits + } + // Namespaces + if err := s.UtsNS.validate(); err != nil { + return err + } + if err := validateIPCNS(&s.IpcNS); err != nil { + return err + } + if err := s.PidNS.validate(); err != nil { + return err + } + if err := s.CgroupNS.validate(); err != nil { + return err + } + if err := validateUserNS(&s.UserNS); err != nil { + return err + } + + // Set defaults if network info is not provided + // when we are rootless we default to slirp4netns + if s.NetNS.IsPrivate() || s.NetNS.IsDefault() { + if rootless.IsRootless() { + s.NetNS.NSMode = Slirp + } else { + s.NetNS.NSMode = Bridge + } + } + if err := validateNetNS(&s.NetNS); err != nil { + return err + } + if s.NetNS.NSMode != Bridge && len(s.Networks) > 0 { + // Note that we also get the ip and mac in the networks map + return errors.New("Networks and static ip/mac address can only be used with Bridge mode networking") + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go new file mode 100644 index 00000000000..f1343f6e206 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go @@ -0,0 +1,538 @@ +package specgen + +import ( + "fmt" + "net" + "os" + "strings" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/cgroups" + cutil "github.com/containers/common/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/storage" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-tools/generate" + "github.com/pkg/errors" +) + +type NamespaceMode string + +const ( + // Default indicates the spec generator should determine + // a sane default + Default NamespaceMode = "default" + // Host means the the namespace is derived from + // the host + Host NamespaceMode = "host" + // Path is the path to a namespace + Path NamespaceMode = "path" + // FromContainer means namespace is derived from a + // different container + FromContainer NamespaceMode = "container" + // FromPod indicates the namespace is derived from a pod + FromPod NamespaceMode = "pod" + // Private indicates the namespace is private + Private NamespaceMode = "private" + // Shareable indicates the namespace is shareable + Shareable NamespaceMode = "shareable" + // None indicates the IPC namespace is created without mounting /dev/shm + None NamespaceMode = "none" + // NoNetwork indicates no network namespace should + // be joined. loopback should still exists. + // Only used with the network namespace, invalid otherwise. + NoNetwork NamespaceMode = "none" + // Bridge indicates that a CNI network stack + // should be used. + // Only used with the network namespace, invalid otherwise. + Bridge NamespaceMode = "bridge" + // Slirp indicates that a slirp4netns network stack should + // be used. + // Only used with the network namespace, invalid otherwise. + Slirp NamespaceMode = "slirp4netns" + // KeepId indicates a user namespace to keep the owner uid inside + // of the namespace itself. + // Only used with the user namespace, invalid otherwise. + KeepID NamespaceMode = "keep-id" + // NoMap indicates a user namespace to keep the owner uid out + // of the namespace itself. + // Only used with the user namespace, invalid otherwise. + NoMap NamespaceMode = "no-map" + // Auto indicates to automatically create a user namespace. + // Only used with the user namespace, invalid otherwise. + Auto NamespaceMode = "auto" + + // DefaultKernelNamespaces is a comma-separated list of default kernel + // namespaces. + DefaultKernelNamespaces = "ipc,net,uts" +) + +// Namespace describes the namespace +type Namespace struct { + NSMode NamespaceMode `json:"nsmode,omitempty"` + Value string `json:"value,omitempty"` +} + +// IsDefault returns whether the namespace is set to the default setting (which +// also includes the empty string). +func (n *Namespace) IsDefault() bool { + return n.NSMode == Default || n.NSMode == "" +} + +// IsHost returns a bool if the namespace is host based +func (n *Namespace) IsHost() bool { + return n.NSMode == Host +} + +// IsNone returns a bool if the namespace is set to none +func (n *Namespace) IsNone() bool { + return n.NSMode == None +} + +// IsBridge returns a bool if the namespace is a Bridge +func (n *Namespace) IsBridge() bool { + return n.NSMode == Bridge +} + +// IsPath indicates via bool if the namespace is based on a path +func (n *Namespace) IsPath() bool { + return n.NSMode == Path +} + +// IsContainer indicates via bool if the namespace is based on a container +func (n *Namespace) IsContainer() bool { + return n.NSMode == FromContainer +} + +// IsPod indicates via bool if the namespace is based on a pod +func (n *Namespace) IsPod() bool { + return n.NSMode == FromPod +} + +// IsPrivate indicates the namespace is private +func (n *Namespace) IsPrivate() bool { + return n.NSMode == Private +} + +// IsAuto indicates the namespace is auto +func (n *Namespace) IsAuto() bool { + return n.NSMode == Auto +} + +// IsKeepID indicates the namespace is KeepID +func (n *Namespace) IsKeepID() bool { + return n.NSMode == KeepID +} + +// IsNoMap indicates the namespace is NoMap +func (n *Namespace) IsNoMap() bool { + return n.NSMode == NoMap +} + +func (n *Namespace) String() string { + if n.Value != "" { + return fmt.Sprintf("%s:%s", n.NSMode, n.Value) + } + return string(n.NSMode) +} + +func validateUserNS(n *Namespace) error { + if n == nil { + return nil + } + switch n.NSMode { + case Auto, KeepID, NoMap: + return nil + } + return n.validate() +} + +func validateNetNS(n *Namespace) error { + if n == nil { + return nil + } + switch n.NSMode { + case Slirp: + break + case "", Default, Host, Path, FromContainer, FromPod, Private, NoNetwork, Bridge: + break + default: + return errors.Errorf("invalid network %q", n.NSMode) + } + + // Path and From Container MUST have a string value set + if n.NSMode == Path || n.NSMode == FromContainer { + if len(n.Value) < 1 { + return errors.Errorf("namespace mode %s requires a value", n.NSMode) + } + } else if n.NSMode != Slirp { + // All others except must NOT set a string value + if len(n.Value) > 0 { + return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode) + } + } + + return nil +} + +func validateIPCNS(n *Namespace) error { + if n == nil { + return nil + } + switch n.NSMode { + case Shareable, None: + return nil + } + return n.validate() +} + +// Validate perform simple validation on the namespace to make sure it is not +// invalid from the get-go +func (n *Namespace) validate() error { + if n == nil { + return nil + } + switch n.NSMode { + case "", Default, Host, Path, FromContainer, FromPod, Private: + // Valid, do nothing + case NoNetwork, Bridge, Slirp: + return errors.Errorf("cannot use network modes with non-network namespace") + default: + return errors.Errorf("invalid namespace type %s specified", n.NSMode) + } + + // Path and From Container MUST have a string value set + if n.NSMode == Path || n.NSMode == FromContainer { + if len(n.Value) < 1 { + return errors.Errorf("namespace mode %s requires a value", n.NSMode) + } + } else { + // All others must NOT set a string value + if len(n.Value) > 0 { + return errors.Errorf("namespace value %s cannot be provided with namespace mode %s", n.Value, n.NSMode) + } + } + return nil +} + +// ParseNamespace parses a namespace in string form. +// This is not intended for the network namespace, which has a separate +// function. +func ParseNamespace(ns string) (Namespace, error) { + toReturn := Namespace{} + switch { + case ns == "pod": + toReturn.NSMode = FromPod + case ns == "host": + toReturn.NSMode = Host + case ns == "private", ns == "": + toReturn.NSMode = Private + case strings.HasPrefix(ns, "ns:"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"") + } + toReturn.NSMode = Path + toReturn.Value = split[1] + case strings.HasPrefix(ns, "container:"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, errors.Errorf("must provide name or ID or a container when specifying \"container:\"") + } + toReturn.NSMode = FromContainer + toReturn.Value = split[1] + default: + return toReturn, errors.Errorf("unrecognized namespace mode %s passed", ns) + } + + return toReturn, nil +} + +// ParseCgroupNamespace parses a cgroup namespace specification in string +// form. +func ParseCgroupNamespace(ns string) (Namespace, error) { + toReturn := Namespace{} + // Cgroup is host for v1, private for v2. + // We can't trust c/common for this, as it only assumes private. + cgroupsv2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return toReturn, err + } + if cgroupsv2 { + switch ns { + case "host": + toReturn.NSMode = Host + case "private", "": + toReturn.NSMode = Private + default: + return toReturn, errors.Errorf("unrecognized cgroup namespace mode %s passed", ns) + } + } else { + toReturn.NSMode = Host + } + return toReturn, nil +} + +// ParseIPCNamespace parses a ipc namespace specification in string +// form. +func ParseIPCNamespace(ns string) (Namespace, error) { + toReturn := Namespace{} + switch { + case ns == "shareable", ns == "": + toReturn.NSMode = Shareable + return toReturn, nil + case ns == "none": + toReturn.NSMode = None + return toReturn, nil + } + return ParseNamespace(ns) +} + +// ParseUserNamespace parses a user namespace specification in string +// form. +func ParseUserNamespace(ns string) (Namespace, error) { + toReturn := Namespace{} + switch { + case ns == "auto": + toReturn.NSMode = Auto + return toReturn, nil + case strings.HasPrefix(ns, "auto:"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, errors.Errorf("invalid setting for auto: mode") + } + toReturn.NSMode = Auto + toReturn.Value = split[1] + return toReturn, nil + case ns == "keep-id": + toReturn.NSMode = KeepID + return toReturn, nil + case ns == "nomap": + toReturn.NSMode = NoMap + return toReturn, nil + case ns == "": + toReturn.NSMode = Host + return toReturn, nil + } + return ParseNamespace(ns) +} + +// ParseNetworkFlag parses a network string slice into the network options +// If the input is nil or empty it will use the default setting from containers.conf +func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) { + var networkOptions map[string][]string + // by default we try to use the containers.conf setting + // if we get at least one value use this instead + ns := containerConfig.Containers.NetNS + if len(networks) > 0 { + ns = networks[0] + } + + toReturn := Namespace{} + podmanNetworks := make(map[string]types.PerNetworkOptions) + + switch { + case ns == string(Slirp), strings.HasPrefix(ns, string(Slirp)+":"): + parts := strings.SplitN(ns, ":", 2) + if len(parts) > 1 { + networkOptions = make(map[string][]string) + networkOptions[parts[0]] = strings.Split(parts[1], ",") + } + toReturn.NSMode = Slirp + case ns == string(FromPod): + toReturn.NSMode = FromPod + case ns == "" || ns == string(Default) || ns == string(Private): + toReturn.NSMode = Private + case ns == string(Bridge), strings.HasPrefix(ns, string(Bridge)+":"): + toReturn.NSMode = Bridge + parts := strings.SplitN(ns, ":", 2) + netOpts := types.PerNetworkOptions{} + if len(parts) > 1 { + var err error + netOpts, err = parseBridgeNetworkOptions(parts[1]) + if err != nil { + return toReturn, nil, nil, err + } + } + // we have to set the special default network name here + podmanNetworks["default"] = netOpts + + case ns == string(NoNetwork): + toReturn.NSMode = NoNetwork + case ns == string(Host): + toReturn.NSMode = Host + case strings.HasPrefix(ns, "ns:"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, nil, nil, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"") + } + toReturn.NSMode = Path + toReturn.Value = split[1] + case strings.HasPrefix(ns, string(FromContainer)+":"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, nil, nil, errors.Errorf("must provide name or ID or a container when specifying \"container:\"") + } + toReturn.NSMode = FromContainer + toReturn.Value = split[1] + default: + // we should have a normal network + parts := strings.SplitN(ns, ":", 2) + if len(parts) == 1 { + // Assume we have been given a comma separated list of networks for backwards compat. + networkList := strings.Split(ns, ",") + for _, net := range networkList { + podmanNetworks[net] = types.PerNetworkOptions{} + } + } else { + if parts[0] == "" { + return toReturn, nil, nil, errors.New("network name cannot be empty") + } + netOpts, err := parseBridgeNetworkOptions(parts[1]) + if err != nil { + return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0]) + } + podmanNetworks[parts[0]] = netOpts + } + + // networks need bridge mode + toReturn.NSMode = Bridge + } + + if len(networks) > 1 { + if !toReturn.IsBridge() { + return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "cannot set multiple networks without bridge network mode, selected mode %s", toReturn.NSMode) + } + + for _, network := range networks[1:] { + parts := strings.SplitN(network, ":", 2) + if parts[0] == "" { + return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "network name cannot be empty") + } + if cutil.StringInSlice(parts[0], []string{string(Bridge), string(Slirp), string(FromPod), string(NoNetwork), + string(Default), string(Private), string(Path), string(FromContainer), string(Host)}) { + return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "can only set extra network names, selected mode %s conflicts with bridge", parts[0]) + } + netOpts := types.PerNetworkOptions{} + if len(parts) > 1 { + var err error + netOpts, err = parseBridgeNetworkOptions(parts[1]) + if err != nil { + return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0]) + } + } + podmanNetworks[parts[0]] = netOpts + } + } + + return toReturn, podmanNetworks, networkOptions, nil +} + +func parseBridgeNetworkOptions(opts string) (types.PerNetworkOptions, error) { + netOpts := types.PerNetworkOptions{} + if len(opts) == 0 { + return netOpts, nil + } + allopts := strings.Split(opts, ",") + for _, opt := range allopts { + split := strings.SplitN(opt, "=", 2) + switch split[0] { + case "ip", "ip6": + ip := net.ParseIP(split[1]) + if ip == nil { + return netOpts, errors.Errorf("invalid ip address %q", split[1]) + } + netOpts.StaticIPs = append(netOpts.StaticIPs, ip) + + case "mac": + mac, err := net.ParseMAC(split[1]) + if err != nil { + return netOpts, err + } + netOpts.StaticMAC = types.HardwareAddr(mac) + + case "alias": + if split[1] == "" { + return netOpts, errors.New("alias cannot be empty") + } + netOpts.Aliases = append(netOpts.Aliases, split[1]) + + case "interface_name": + if split[1] == "" { + return netOpts, errors.New("interface_name cannot be empty") + } + netOpts.InterfaceName = split[1] + + default: + return netOpts, errors.Errorf("unknown bridge network option: %s", split[0]) + } + } + return netOpts, nil +} + +func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *generate.Generator) (string, error) { + // User + var user string + switch userns.NSMode { + case Path: + if _, err := os.Stat(userns.Value); err != nil { + return user, errors.Wrap(err, "cannot find specified user namespace path") + } + if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), userns.Value); err != nil { + return user, err + } + // runc complains if no mapping is specified, even if we join another ns. So provide a dummy mapping + g.AddLinuxUIDMapping(uint32(0), uint32(0), uint32(1)) + g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1)) + case Host: + if err := g.RemoveLinuxNamespace(string(spec.UserNamespace)); err != nil { + return user, err + } + case KeepID: + mappings, uid, gid, err := util.GetKeepIDMapping() + if err != nil { + return user, err + } + idmappings = mappings + g.SetProcessUID(uint32(uid)) + g.SetProcessGID(uint32(gid)) + user = fmt.Sprintf("%d:%d", uid, gid) + if err := privateUserNamespace(idmappings, g); err != nil { + return user, err + } + case NoMap: + mappings, uid, gid, err := util.GetNoMapMapping() + if err != nil { + return user, err + } + idmappings = mappings + g.SetProcessUID(uint32(uid)) + g.SetProcessGID(uint32(gid)) + user = fmt.Sprintf("%d:%d", uid, gid) + if err := privateUserNamespace(idmappings, g); err != nil { + return user, err + } + case Private: + if err := privateUserNamespace(idmappings, g); err != nil { + return user, err + } + } + return user, nil +} + +func privateUserNamespace(idmappings *storage.IDMappingOptions, g *generate.Generator) error { + if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil { + return err + } + if idmappings == nil || (len(idmappings.UIDMap) == 0 && len(idmappings.GIDMap) == 0) { + return errors.Errorf("must provide at least one UID or GID mapping to configure a user namespace") + } + for _, uidmap := range idmappings.UIDMap { + g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size)) + } + for _, gidmap := range idmappings.GIDMap { + g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size)) + } + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go new file mode 100644 index 00000000000..8d971a25e20 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go @@ -0,0 +1,88 @@ +package specgen + +import ( + "github.com/containers/podman/v4/pkg/util" + "github.com/pkg/errors" +) + +var ( + // ErrInvalidPodSpecConfig describes an error given when the podspecgenerator is invalid + ErrInvalidPodSpecConfig = errors.New("invalid pod spec") + // containerConfig has the default configurations defined in containers.conf + containerConfig = util.DefaultContainerConfig() +) + +func exclusivePodOptions(opt1, opt2 string) error { + return errors.Wrapf(ErrInvalidPodSpecConfig, "%s and %s are mutually exclusive pod options", opt1, opt2) +} + +// Validate verifies the input is valid +func (p *PodSpecGenerator) Validate() error { + // PodBasicConfig + if p.NoInfra { + if len(p.InfraCommand) > 0 { + return exclusivePodOptions("NoInfra", "InfraCommand") + } + if len(p.InfraImage) > 0 { + return exclusivePodOptions("NoInfra", "InfraImage") + } + if len(p.InfraName) > 0 { + return exclusivePodOptions("NoInfra", "InfraName") + } + if len(p.SharedNamespaces) > 0 { + return exclusivePodOptions("NoInfra", "SharedNamespaces") + } + } + + // PodNetworkConfig + if err := validateNetNS(&p.NetNS); err != nil { + return err + } + if p.NoInfra { + if p.NetNS.NSMode != Default && p.NetNS.NSMode != "" { + return errors.New("NoInfra and network modes cannot be used together") + } + // Note that networks might be set when --ip or --mac was set + // so we need to check that no networks are set without the infra + if len(p.Networks) > 0 { + return errors.New("cannot set networks options without infra container") + } + if len(p.DNSOption) > 0 { + return exclusivePodOptions("NoInfra", "DNSOption") + } + if len(p.DNSSearch) > 0 { + return exclusivePodOptions("NoInfo", "DNSSearch") + } + if len(p.DNSServer) > 0 { + return exclusivePodOptions("NoInfra", "DNSServer") + } + if len(p.HostAdd) > 0 { + return exclusivePodOptions("NoInfra", "HostAdd") + } + if p.NoManageResolvConf { + return exclusivePodOptions("NoInfra", "NoManageResolvConf") + } + } + if p.NetNS.NSMode != "" && p.NetNS.NSMode != Bridge && p.NetNS.NSMode != Slirp && p.NetNS.NSMode != Default { + if len(p.PortMappings) > 0 { + return errors.New("PortMappings can only be used with Bridge or slirp4netns networking") + } + } + + if p.NoManageResolvConf { + if len(p.DNSServer) > 0 { + return exclusivePodOptions("NoManageResolvConf", "DNSServer") + } + if len(p.DNSSearch) > 0 { + return exclusivePodOptions("NoManageResolvConf", "DNSSearch") + } + if len(p.DNSOption) > 0 { + return exclusivePodOptions("NoManageResolvConf", "DNSOption") + } + } + if p.NoManageHosts && len(p.HostAdd) > 0 { + return exclusivePodOptions("NoManageHosts", "HostAdd") + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go b/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go new file mode 100644 index 00000000000..ad9414f67b8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go @@ -0,0 +1,230 @@ +package specgen + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + storageTypes "github.com/containers/storage/types" + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +// PodBasicConfig contains basic configuration options for pods. +type PodBasicConfig struct { + // Name is the name of the pod. + // If not provided, a name will be generated when the pod is created. + // Optional. + Name string `json:"name,omitempty"` + // Hostname is the pod's hostname. If not set, the name of the pod will + // be used (if a name was not provided here, the name auto-generated for + // the pod will be used). This will be used by the infra container and + // all containers in the pod as long as the UTS namespace is shared. + // Optional. + Hostname string `json:"hostname,omitempty"` + // Labels are key-value pairs that are used to add metadata to pods. + // Optional. + Labels map[string]string `json:"labels,omitempty"` + // NoInfra tells the pod not to create an infra container. If this is + // done, many networking-related options will become unavailable. + // Conflicts with setting any options in PodNetworkConfig, and the + // InfraCommand and InfraImages in this struct. + // Optional. + NoInfra bool `json:"no_infra,omitempty"` + // InfraConmonPidFile is a custom path to store the infra container's + // conmon PID. + InfraConmonPidFile string `json:"infra_conmon_pid_file,omitempty"` + // InfraCommand sets the command that will be used to start the infra + // container. + // If not set, the default set in the Libpod configuration file will be + // used. + // Conflicts with NoInfra=true. + // Optional. + InfraCommand []string `json:"infra_command,omitempty"` + // InfraImage is the image that will be used for the infra container. + // If not set, the default set in the Libpod configuration file will be + // used. + // Conflicts with NoInfra=true. + // Optional. + InfraImage string `json:"infra_image,omitempty"` + // InfraName is the name that will be used for the infra container. + // If not set, the default set in the Libpod configuration file will be + // used. + // Conflicts with NoInfra=true. + // Optional. + InfraName string `json:"infra_name,omitempty"` + // SharedNamespaces instructs the pod to share a set of namespaces. + // Shared namespaces will be joined (by default) by every container + // which joins the pod. + // If not set and NoInfra is false, the pod will set a default set of + // namespaces to share. + // Conflicts with NoInfra=true. + // Optional. + SharedNamespaces []string `json:"shared_namespaces,omitempty"` + // PodCreateCommand is the command used to create this pod. + // This will be shown in the output of Inspect() on the pod, and may + // also be used by some tools that wish to recreate the pod + // (e.g. `podman generate systemd --new`). + // Optional. + // ShareParent determines if all containers in the pod will share the pod's cgroup as the cgroup parent + ShareParent *bool `json:"share_parent,omitempty"` + PodCreateCommand []string `json:"pod_create_command,omitempty"` + // Pid sets the process id namespace of the pod + // Optional (defaults to private if unset). This sets the PID namespace of the infra container + // This configuration will then be shared with the entire pod if PID namespace sharing is enabled via --share + Pid Namespace `json:"pidns,omitempty"` + // Userns is used to indicate which kind of Usernamespace to enter. + // Any containers created within the pod will inherit the pod's userns settings. + // Optional + Userns Namespace `json:"userns,omitempty"` + // Devices contains user specified Devices to be added to the Pod + Devices []string `json:"pod_devices,omitempty"` + // Sysctl sets kernel parameters for the pod + Sysctl map[string]string `json:"sysctl,omitempty"` +} + +// PodNetworkConfig contains networking configuration for a pod. +type PodNetworkConfig struct { + // NetNS is the configuration to use for the infra container's network + // namespace. This network will, by default, be shared with all + // containers in the pod. + // Cannot be set to FromContainer and FromPod. + // Setting this to anything except default conflicts with NoInfra=true. + // Defaults to Bridge as root and Slirp as rootless. + // Mandatory. + NetNS Namespace `json:"netns,omitempty"` + // PortMappings is a set of ports to map into the infra container. + // As, by default, containers share their network with the infra + // container, this will forward the ports to the entire pod. + // Only available if NetNS is set to Bridge or Slirp. + // Optional. + PortMappings []types.PortMapping `json:"portmappings,omitempty"` + // Map of networks names to ids the container should join to. + // You can request additional settings for each network, you can + // set network aliases, static ips, static mac address and the + // network interface name for this container on the specific network. + // If the map is empty and the bridge network mode is set the container + // will be joined to the default network. + Networks map[string]types.PerNetworkOptions + // CNINetworks is a list of CNI networks to join the container to. + // If this list is empty, the default CNI network will be joined + // instead. If at least one entry is present, we will not join the + // default network (unless it is part of this list). + // Only available if NetNS is set to bridge. + // Optional. + // Deprecated: as of podman 4.0 use "Networks" instead. + CNINetworks []string `json:"cni_networks,omitempty"` + // NoManageResolvConf indicates that /etc/resolv.conf should not be + // managed by the pod. Instead, each container will create and manage a + // separate resolv.conf as if they had not joined a pod. + // Conflicts with NoInfra=true and DNSServer, DNSSearch, DNSOption. + // Optional. + NoManageResolvConf bool `json:"no_manage_resolv_conf,omitempty"` + // DNSServer is a set of DNS servers that will be used in the infra + // container's resolv.conf, which will, by default, be shared with all + // containers in the pod. + // If not provided, the host's DNS servers will be used, unless the only + // server set is a localhost address. As the container cannot connect to + // the host's localhost, a default server will instead be set. + // Conflicts with NoInfra=true. + // Optional. + DNSServer []net.IP `json:"dns_server,omitempty"` + // DNSSearch is a set of DNS search domains that will be used in the + // infra container's resolv.conf, which will, by default, be shared with + // all containers in the pod. + // If not provided, DNS search domains from the host's resolv.conf will + // be used. + // Conflicts with NoInfra=true. + // Optional. + DNSSearch []string `json:"dns_search,omitempty"` + // DNSOption is a set of DNS options that will be used in the infra + // container's resolv.conf, which will, by default, be shared with all + // containers in the pod. + // Conflicts with NoInfra=true. + // Optional. + DNSOption []string `json:"dns_option,omitempty"` + // NoManageHosts indicates that /etc/hosts should not be managed by the + // pod. Instead, each container will create a separate /etc/hosts as + // they would if not in a pod. + // Conflicts with HostAdd. + NoManageHosts bool `json:"no_manage_hosts,omitempty"` + // HostAdd is a set of hosts that will be added to the infra container's + // /etc/hosts that will, by default, be shared with all containers in + // the pod. + // Conflicts with NoInfra=true and NoManageHosts. + // Optional. + HostAdd []string `json:"hostadd,omitempty"` + // NetworkOptions are additional options for each network + // Optional. + NetworkOptions map[string][]string `json:"network_options,omitempty"` +} + +// PodStorageConfig contains all of the storage related options for the pod and its infra container. +type PodStorageConfig struct { + // Mounts are mounts that will be added to the pod. + // These will supersede Image Volumes and VolumesFrom volumes where + // there are conflicts. + // Optional. + Mounts []spec.Mount `json:"mounts,omitempty"` + // Volumes are named volumes that will be added to the pod. + // These will supersede Image Volumes and VolumesFrom volumes where + // there are conflicts. + // Optional. + Volumes []*NamedVolume `json:"volumes,omitempty"` + // Overlay volumes are named volumes that will be added to the pod. + // Optional. + OverlayVolumes []*OverlayVolume `json:"overlay_volumes,omitempty"` + // Image volumes bind-mount a container-image mount into the pod's infra container. + // Optional. + ImageVolumes []*ImageVolume `json:"image_volumes,omitempty"` + // VolumesFrom is a set of containers whose volumes will be added to + // this pod. The name or ID of the container must be provided, and + // may optionally be followed by a : and then one or more + // comma-separated options. Valid options are 'ro', 'rw', and 'z'. + // Options will be used for all volumes sourced from the container. + VolumesFrom []string `json:"volumes_from,omitempty"` +} + +// PodCgroupConfig contains configuration options about a pod's cgroups. +// This will be expanded in future updates to pods. +type PodCgroupConfig struct { + // CgroupParent is the parent for the Cgroup that the pod will create. + // This pod cgroup will, in turn, be the default cgroup parent for all + // containers in the pod. + // Optional. + CgroupParent string `json:"cgroup_parent,omitempty"` +} + +// PodSpecGenerator describes options to create a pod +// swagger:model PodSpecGenerator +type PodSpecGenerator struct { + PodBasicConfig + PodNetworkConfig + PodCgroupConfig + PodResourceConfig + PodStorageConfig + PodSecurityConfig + InfraContainerSpec *SpecGenerator `json:"-"` +} + +type PodResourceConfig struct { + // ResourceLimits contains linux specific CPU data for the pod + ResourceLimits *spec.LinuxResources `json:"resource_limits,omitempty"` + // CPU period of the cpuset, determined by --cpus + CPUPeriod uint64 `json:"cpu_period,omitempty"` + // CPU quota of the cpuset, determined by --cpus + CPUQuota int64 `json:"cpu_quota,omitempty"` + // ThrottleReadBpsDevice contains the rate at which the devices in the pod can be read from/accessed + ThrottleReadBpsDevice map[string]spec.LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"` +} + +type PodSecurityConfig struct { + SecurityOpt []string `json:"security_opt,omitempty"` + // IDMappings are UID and GID mappings that will be used by user + // namespaces. + // Required if UserNS is private. + IDMappings *storageTypes.IDMappingOptions `json:"idmappings,omitempty"` +} + +// NewPodSpecGenerator creates a new pod spec +func NewPodSpecGenerator() *PodSpecGenerator { + return &PodSpecGenerator{} +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go new file mode 100644 index 00000000000..79e20667b12 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go @@ -0,0 +1,597 @@ +package specgen + +import ( + "net" + "strings" + "syscall" + + "github.com/containers/common/libimage" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/image/v5/manifest" + "github.com/containers/storage/types" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// LogConfig describes the logging characteristics for a container +type LogConfig struct { + // LogDriver is the container's log driver. + // Optional. + Driver string `json:"driver,omitempty"` + // LogPath is the path the container's logs will be stored at. + // Only available if LogDriver is set to "json-file" or "k8s-file". + // Optional. + Path string `json:"path,omitempty"` + // Size is the maximum size of the log file + // Optional. + Size int64 `json:"size,omitempty"` + // A set of options to accompany the log driver. + // Optional. + Options map[string]string `json:"options,omitempty"` +} + +// ContainerBasicConfig contains the basic parts of a container. +type ContainerBasicConfig struct { + // Name is the name the container will be given. + // If no name is provided, one will be randomly generated. + // Optional. + Name string `json:"name,omitempty"` + // Pod is the ID of the pod the container will join. + // Optional. + Pod string `json:"pod,omitempty"` + // Entrypoint is the container's entrypoint. + // If not given and Image is specified, this will be populated by the + // image's configuration. + // Optional. + Entrypoint []string `json:"entrypoint,omitempty"` + // Command is the container's command. + // If not given and Image is specified, this will be populated by the + // image's configuration. + // Optional. + Command []string `json:"command,omitempty"` + // EnvHost indicates that the host environment should be added to container + // Optional. + EnvHost bool `json:"env_host,omitempty"` + // EnvHTTPProxy indicates that the http host proxy environment variables + // should be added to container + // Optional. + HTTPProxy bool `json:"httpproxy,omitempty"` + // Env is a set of environment variables that will be set in the + // container. + // Optional. + Env map[string]string `json:"env,omitempty"` + // Terminal is whether the container will create a PTY. + // Optional. + Terminal bool `json:"terminal,omitempty"` + // Stdin is whether the container will keep its STDIN open. + Stdin bool `json:"stdin,omitempty"` + // Labels are key-value pairs that are used to add metadata to + // containers. + // Optional. + Labels map[string]string `json:"labels,omitempty"` + // Annotations are key-value options passed into the container runtime + // that can be used to trigger special behavior. + // Optional. + Annotations map[string]string `json:"annotations,omitempty"` + // StopSignal is the signal that will be used to stop the container. + // Must be a non-zero integer below SIGRTMAX. + // If not provided, the default, SIGTERM, will be used. + // Will conflict with Systemd if Systemd is set to "true" or "always". + // Optional. + StopSignal *syscall.Signal `json:"stop_signal,omitempty"` + // StopTimeout is a timeout between the container's stop signal being + // sent and SIGKILL being sent. + // If not provided, the default will be used. + // If 0 is used, stop signal will not be sent, and SIGKILL will be sent + // instead. + // Optional. + StopTimeout *uint `json:"stop_timeout,omitempty"` + // Timeout is a maximum time in seconds the container will run before + // main process is sent SIGKILL. + // If 0 is used, signal will not be sent. Container can run indefinitely + // Optional. + Timeout uint `json:"timeout,omitempty"` + // LogConfiguration describes the logging for a container including + // driver, path, and options. + // Optional + LogConfiguration *LogConfig `json:"log_configuration,omitempty"` + // ConmonPidFile is a path at which a PID file for Conmon will be + // placed. + // If not given, a default location will be used. + // Optional. + ConmonPidFile string `json:"conmon_pid_file,omitempty"` + // RawImageName is the user-specified and unprocessed input referring + // to a local or a remote image. + RawImageName string `json:"raw_image_name,omitempty"` + // RestartPolicy is the container's restart policy - an action which + // will be taken when the container exits. + // If not given, the default policy, which does nothing, will be used. + // Optional. + RestartPolicy string `json:"restart_policy,omitempty"` + // RestartRetries is the number of attempts that will be made to restart + // the container. + // Only available when RestartPolicy is set to "on-failure". + // Optional. + RestartRetries *uint `json:"restart_tries,omitempty"` + // OCIRuntime is the name of the OCI runtime that will be used to create + // the container. + // If not specified, the default will be used. + // Optional. + OCIRuntime string `json:"oci_runtime,omitempty"` + // Systemd is whether the container will be started in systemd mode. + // Valid options are "true", "false", and "always". + // "true" enables this mode only if the binary run in the container is + // /sbin/init or systemd. "always" unconditionally enables systemd mode. + // "false" unconditionally disables systemd mode. + // If enabled, mounts and stop signal will be modified. + // If set to "always" or set to "true" and conditionally triggered, + // conflicts with StopSignal. + // If not specified, "false" will be assumed. + // Optional. + Systemd string `json:"systemd,omitempty"` + // Determine how to handle the NOTIFY_SOCKET - do we participate or pass it through + // "container" - let the OCI runtime deal with it, advertise conmon's MAINPID + // "conmon-only" - advertise conmon's MAINPID, send READY when started, don't pass to OCI + // "ignore" - unset NOTIFY_SOCKET + SdNotifyMode string `json:"sdnotifyMode,omitempty"` + // Namespace is the libpod namespace the container will be placed in. + // Optional. + Namespace string `json:"namespace,omitempty"` + // PidNS is the container's PID namespace. + // It defaults to private. + // Mandatory. + PidNS Namespace `json:"pidns,omitempty"` + // UtsNS is the container's UTS namespace. + // It defaults to private. + // Must be set to Private to set Hostname. + // Mandatory. + UtsNS Namespace `json:"utsns,omitempty"` + // Hostname is the container's hostname. If not set, the hostname will + // not be modified (if UtsNS is not private) or will be set to the + // container ID (if UtsNS is private). + // Conflicts with UtsNS if UtsNS is not set to private. + // Optional. + Hostname string `json:"hostname,omitempty"` + // HostUses is a list of host usernames or UIDs to add to the container + // /etc/passwd file + HostUsers []string `json:"hostusers,omitempty"` + // Sysctl sets kernel parameters for the container + Sysctl map[string]string `json:"sysctl,omitempty"` + // Remove indicates if the container should be removed once it has been started + // and exits + Remove bool `json:"remove,omitempty"` + // ContainerCreateCommand is the command that was used to create this + // container. + // This will be shown in the output of Inspect() on the container, and + // may also be used by some tools that wish to recreate the container + // (e.g. `podman generate systemd --new`). + // Optional. + ContainerCreateCommand []string `json:"containerCreateCommand,omitempty"` + // PreserveFDs is a number of additional file descriptors (in addition + // to 0, 1, 2) that will be passed to the executed process. The total FDs + // passed will be 3 + PreserveFDs. + // set tags as `json:"-"` for not supported remote + // Optional. + PreserveFDs uint `json:"-"` + // Timezone is the timezone inside the container. + // Local means it has the same timezone as the host machine + // Optional. + Timezone string `json:"timezone,omitempty"` + // DependencyContainers is an array of containers this container + // depends on. Dependency containers must be started before this + // container. Dependencies can be specified by name or full/partial ID. + // Optional. + DependencyContainers []string `json:"dependencyContainers,omitempty"` + // PidFile is the file that saves container process id. + // set tags as `json:"-"` for not supported remote + // Optional. + PidFile string `json:"-"` + // EnvSecrets are secrets that will be set as environment variables + // Optional. + EnvSecrets map[string]string `json:"secret_env,omitempty"` + // InitContainerType describes if this container is an init container + // and if so, what type: always or once + InitContainerType string `json:"init_container_type"` + // Personality allows users to configure different execution domains. + // Execution domains tell Linux how to map signal numbers into signal actions. + // The execution domain system allows Linux to provide limited support + // for binaries compiled under other UNIX-like operating systems. + Personality *spec.LinuxPersonality `json:"personality,omitempty"` + // UnsetEnv unsets the specified default environment variables from the image or from buildin or containers.conf + // Optional. + UnsetEnv []string `json:"unsetenv,omitempty"` + // UnsetEnvAll unsetall default environment variables from the image or from buildin or containers.conf + // UnsetEnvAll unsets all default environment variables from the image or from buildin + // Optional. + UnsetEnvAll bool `json:"unsetenvall,omitempty"` + // Passwd is a container run option that determines if we are validating users/groups before running the container + Passwd *bool `json:"manage_password,omitempty"` + // PasswdEntry specifies arbitrary data to append to a file. + PasswdEntry string `json:"passwd_entry,omitempty"` +} + +// ContainerStorageConfig contains information on the storage configuration of a +// container. +type ContainerStorageConfig struct { + // Image is the image the container will be based on. The image will be + // used as the container's root filesystem, and its environment vars, + // volumes, and other configuration will be applied to the container. + // Conflicts with Rootfs. + // At least one of Image or Rootfs must be specified. + Image string `json:"image"` + // Rootfs is the path to a directory that will be used as the + // container's root filesystem. No modification will be made to the + // directory, it will be directly mounted into the container as root. + // Conflicts with Image. + // At least one of Image or Rootfs must be specified. + Rootfs string `json:"rootfs,omitempty"` + // RootfsOverlay tells if rootfs is actually an overlay on top of base path + RootfsOverlay bool `json:"rootfs_overlay,omitempty"` + // ImageVolumeMode indicates how image volumes will be created. + // Supported modes are "ignore" (do not create), "tmpfs" (create as + // tmpfs), and "anonymous" (create as anonymous volumes). + // The default if unset is anonymous. + // Optional. + ImageVolumeMode string `json:"image_volume_mode,omitempty"` + // VolumesFrom is a set of containers whose volumes will be added to + // this container. The name or ID of the container must be provided, and + // may optionally be followed by a : and then one or more + // comma-separated options. Valid options are 'ro', 'rw', and 'z'. + // Options will be used for all volumes sourced from the container. + VolumesFrom []string `json:"volumes_from,omitempty"` + // Init specifies that an init binary will be mounted into the + // container, and will be used as PID1. + Init bool `json:"init,omitempty"` + // InitPath specifies the path to the init binary that will be added if + // Init is specified above. If not specified, the default set in the + // Libpod config will be used. Ignored if Init above is not set. + // Optional. + InitPath string `json:"init_path,omitempty"` + // Mounts are mounts that will be added to the container. + // These will supersede Image Volumes and VolumesFrom volumes where + // there are conflicts. + // Optional. + Mounts []spec.Mount `json:"mounts,omitempty"` + // Volumes are named volumes that will be added to the container. + // These will supersede Image Volumes and VolumesFrom volumes where + // there are conflicts. + // Optional. + Volumes []*NamedVolume `json:"volumes,omitempty"` + // Overlay volumes are named volumes that will be added to the container. + // Optional. + OverlayVolumes []*OverlayVolume `json:"overlay_volumes,omitempty"` + // Image volumes bind-mount a container-image mount into the container. + // Optional. + ImageVolumes []*ImageVolume `json:"image_volumes,omitempty"` + // Devices are devices that will be added to the container. + // Optional. + Devices []spec.LinuxDevice `json:"devices,omitempty"` + // DeviceCgroupRule are device cgroup rules that allow containers + // to use additional types of devices. + DeviceCgroupRule []spec.LinuxDeviceCgroup `json:"device_cgroup_rule,omitempty"` + // DevicesFrom is a way to ensure your container inherits device specific information from another container + DevicesFrom []string `json:"devices_from,omitempty"` + // HostDeviceList is used to recreate the mounted device on inherited containers + HostDeviceList []spec.LinuxDevice `json:"host_device_list,omitempty"` + // IpcNS is the container's IPC namespace. + // Default is private. + // Conflicts with ShmSize if not set to private. + // Mandatory. + IpcNS Namespace `json:"ipcns,omitempty"` + // ShmSize is the size of the tmpfs to mount in at /dev/shm, in bytes. + // Conflicts with ShmSize if IpcNS is not private. + // Optional. + ShmSize *int64 `json:"shm_size,omitempty"` + // WorkDir is the container's working directory. + // If unset, the default, /, will be used. + // Optional. + WorkDir string `json:"work_dir,omitempty"` + // Create the working directory if it doesn't exist. + // If unset, it doesn't create it. + // Optional. + CreateWorkingDir bool `json:"create_working_dir,omitempty"` + // StorageOpts is the container's storage options + // Optional. + StorageOpts map[string]string `json:"storage_opts,omitempty"` + // RootfsPropagation is the rootfs propagation mode for the container. + // If not set, the default of rslave will be used. + // Optional. + RootfsPropagation string `json:"rootfs_propagation,omitempty"` + // Secrets are the secrets that will be added to the container + // Optional. + Secrets []Secret `json:"secrets,omitempty"` + // Volatile specifies whether the container storage can be optimized + // at the cost of not syncing all the dirty files in memory. + Volatile bool `json:"volatile,omitempty"` + // ChrootDirs is an additional set of directories that need to be + // treated as root directories. Standard bind mounts will be mounted + // into paths relative to these directories. + ChrootDirs []string `json:"chroot_directories,omitempty"` +} + +// ContainerSecurityConfig is a container's security features, including +// SELinux, Apparmor, and Seccomp. +type ContainerSecurityConfig struct { + // Privileged is whether the container is privileged. + // Privileged does the following: + // - Adds all devices on the system to the container. + // - Adds all capabilities to the container. + // - Disables Seccomp, SELinux, and Apparmor confinement. + // (Though SELinux can be manually re-enabled). + // TODO: this conflicts with things. + // TODO: this does more. + Privileged bool `json:"privileged,omitempty"` + // User is the user the container will be run as. + // Can be given as a UID or a username; if a username, it will be + // resolved within the container, using the container's /etc/passwd. + // If unset, the container will be run as root. + // Optional. + User string `json:"user,omitempty"` + // Groups are a list of supplemental groups the container's user will + // be granted access to. + // Optional. + Groups []string `json:"groups,omitempty"` + // CapAdd are capabilities which will be added to the container. + // Conflicts with Privileged. + // Optional. + CapAdd []string `json:"cap_add,omitempty"` + // CapDrop are capabilities which will be removed from the container. + // Conflicts with Privileged. + // Optional. + CapDrop []string `json:"cap_drop,omitempty"` + // SelinuxProcessLabel is the process label the container will use. + // If SELinux is enabled and this is not specified, a label will be + // automatically generated if not specified. + // Optional. + SelinuxOpts []string `json:"selinux_opts,omitempty"` + // ApparmorProfile is the name of the Apparmor profile the container + // will use. + // Optional. + ApparmorProfile string `json:"apparmor_profile,omitempty"` + // SeccompPolicy determines which seccomp profile gets applied + // the container. valid values: empty,default,image + SeccompPolicy string `json:"seccomp_policy,omitempty"` + // SeccompProfilePath is the path to a JSON file containing the + // container's Seccomp profile. + // If not specified, no Seccomp profile will be used. + // Optional. + SeccompProfilePath string `json:"seccomp_profile_path,omitempty"` + // NoNewPrivileges is whether the container will set the no new + // privileges flag on create, which disables gaining additional + // privileges (e.g. via setuid) in the container. + NoNewPrivileges bool `json:"no_new_privileges,omitempty"` + // UserNS is the container's user namespace. + // It defaults to host, indicating that no user namespace will be + // created. + // If set to private, IDMappings must be set. + // Mandatory. + UserNS Namespace `json:"userns,omitempty"` + // IDMappings are UID and GID mappings that will be used by user + // namespaces. + // Required if UserNS is private. + IDMappings *types.IDMappingOptions `json:"idmappings,omitempty"` + // ReadOnlyFilesystem indicates that everything will be mounted + // as read-only + ReadOnlyFilesystem bool `json:"read_only_filesystem,omitempty"` + // Umask is the umask the init process of the container will be run with. + Umask string `json:"umask,omitempty"` + // ProcOpts are the options used for the proc mount. + ProcOpts []string `json:"procfs_opts,omitempty"` + // Mask is the path we want to mask in the container. This masks the paths + // given in addition to the default list. + // Optional + Mask []string `json:"mask,omitempty"` + // Unmask is the path we want to unmask in the container. To override + // all the default paths that are masked, set unmask=ALL. + Unmask []string `json:"unmask,omitempty"` +} + +// ContainerCgroupConfig contains configuration information about a container's +// cgroups. +type ContainerCgroupConfig struct { + // CgroupNS is the container's cgroup namespace. + // It defaults to private. + // Mandatory. + CgroupNS Namespace `json:"cgroupns,omitempty"` + // CgroupsMode sets a policy for how cgroups will be created in the + // container, including the ability to disable creation entirely. + CgroupsMode string `json:"cgroups_mode,omitempty"` + // CgroupParent is the container's Cgroup parent. + // If not set, the default for the current cgroup driver will be used. + // Optional. + CgroupParent string `json:"cgroup_parent,omitempty"` +} + +// ContainerNetworkConfig contains information on a container's network +// configuration. +type ContainerNetworkConfig struct { + // NetNS is the configuration to use for the container's network + // namespace. + // Mandatory. + NetNS Namespace `json:"netns,omitempty"` + // PortBindings is a set of ports to map into the container. + // Only available if NetNS is set to bridge or slirp. + // Optional. + PortMappings []nettypes.PortMapping `json:"portmappings,omitempty"` + // PublishExposedPorts will publish ports specified in the image to + // random unused ports (guaranteed to be above 1024) on the host. + // This is based on ports set in Expose below, and any ports specified + // by the Image (if one is given). + // Only available if NetNS is set to Bridge or Slirp. + PublishExposedPorts bool `json:"publish_image_ports,omitempty"` + // Expose is a number of ports that will be forwarded to the container + // if PublishExposedPorts is set. + // Expose is a map of uint16 (port number) to a string representing + // protocol i.e map[uint16]string. Allowed protocols are "tcp", "udp", and "sctp", or some + // combination of the three separated by commas. + // If protocol is set to "" we will assume TCP. + // Only available if NetNS is set to Bridge or Slirp, and + // PublishExposedPorts is set. + // Optional. + Expose map[uint16]string `json:"expose,omitempty"` + // Map of networks names or ids that the container should join. + // You can request additional settings for each network, you can + // set network aliases, static ips, static mac address and the + // network interface name for this container on the specific network. + // If the map is empty and the bridge network mode is set the container + // will be joined to the default network. + Networks map[string]nettypes.PerNetworkOptions + // CNINetworks is a list of CNI networks to join the container to. + // If this list is empty, the default CNI network will be joined + // instead. If at least one entry is present, we will not join the + // default network (unless it is part of this list). + // Only available if NetNS is set to bridge. + // Optional. + // Deprecated: as of podman 4.0 use "Networks" instead. + CNINetworks []string `json:"cni_networks,omitempty"` + // UseImageResolvConf indicates that resolv.conf should not be managed + // by Podman, but instead sourced from the image. + // Conflicts with DNSServer, DNSSearch, DNSOption. + UseImageResolvConf bool `json:"use_image_resolve_conf,omitempty"` + // DNSServers is a set of DNS servers that will be used in the + // container's resolv.conf, replacing the host's DNS Servers which are + // used by default. + // Conflicts with UseImageResolvConf. + // Optional. + DNSServers []net.IP `json:"dns_server,omitempty"` + // DNSSearch is a set of DNS search domains that will be used in the + // container's resolv.conf, replacing the host's DNS search domains + // which are used by default. + // Conflicts with UseImageResolvConf. + // Optional. + DNSSearch []string `json:"dns_search,omitempty"` + // DNSOptions is a set of DNS options that will be used in the + // container's resolv.conf, replacing the host's DNS options which are + // used by default. + // Conflicts with UseImageResolvConf. + // Optional. + DNSOptions []string `json:"dns_option,omitempty"` + // UseImageHosts indicates that /etc/hosts should not be managed by + // Podman, and instead sourced from the image. + // Conflicts with HostAdd. + // Do not set omitempty here, if this is false it should be set to not get + // the server default. + // Ideally this would be a pointer so we could differentiate between an + // explicitly false/true and unset (containers.conf default). However + // specgen is stable so we can not change this right now. + // TODO (5.0): change to pointer + UseImageHosts bool `json:"use_image_hosts"` + // HostAdd is a set of hosts which will be added to the container's + // /etc/hosts file. + // Conflicts with UseImageHosts. + // Optional. + HostAdd []string `json:"hostadd,omitempty"` + // NetworkOptions are additional options for each network + // Optional. + NetworkOptions map[string][]string `json:"network_options,omitempty"` +} + +// ContainerResourceConfig contains information on container resource limits. +type ContainerResourceConfig struct { + // ResourceLimits are resource limits to apply to the container., + // Can only be set as root on cgroups v1 systems, but can be set as + // rootless as well for cgroups v2. + // Optional. + ResourceLimits *spec.LinuxResources `json:"resource_limits,omitempty"` + // Rlimits are POSIX rlimits to apply to the container. + // Optional. + Rlimits []spec.POSIXRlimit `json:"r_limits,omitempty"` + // OOMScoreAdj adjusts the score used by the OOM killer to determine + // processes to kill for the container's process. + // Optional. + OOMScoreAdj *int `json:"oom_score_adj,omitempty"` + // Weight per cgroup per device, can override BlkioWeight + WeightDevice map[string]spec.LinuxWeightDevice `json:"weightDevice,omitempty"` + // IO read rate limit per cgroup per device, bytes per second + ThrottleReadBpsDevice map[string]spec.LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"` + // IO write rate limit per cgroup per device, bytes per second + ThrottleWriteBpsDevice map[string]spec.LinuxThrottleDevice `json:"throttleWriteBpsDevice,omitempty"` + // IO read rate limit per cgroup per device, IO per second + ThrottleReadIOPSDevice map[string]spec.LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"` + // IO write rate limit per cgroup per device, IO per second + ThrottleWriteIOPSDevice map[string]spec.LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"` + // CgroupConf are key-value options passed into the container runtime + // that are used to configure cgroup v2. + // Optional. + CgroupConf map[string]string `json:"unified,omitempty"` + // CPU period of the cpuset, determined by --cpus + CPUPeriod uint64 `json:"cpu_period,omitempty"` + // CPU quota of the cpuset, determined by --cpus + CPUQuota int64 `json:"cpu_quota,omitempty"` +} + +// ContainerHealthCheckConfig describes a container healthcheck with attributes +// like command, retries, interval, start period, and timeout. +type ContainerHealthCheckConfig struct { + HealthConfig *manifest.Schema2HealthConfig `json:"healthconfig,omitempty"` +} + +// SpecGenerator creates an OCI spec and Libpod configuration options to create +// a container based on the given configuration. +// swagger:model SpecGenerator +type SpecGenerator struct { + ContainerBasicConfig + ContainerStorageConfig + ContainerSecurityConfig + ContainerCgroupConfig + ContainerNetworkConfig + ContainerResourceConfig + ContainerHealthCheckConfig + + image *libimage.Image `json:"-"` + resolvedImageName string `json:"-"` +} + +// SetImage sets the associated for the generator. +func (s *SpecGenerator) SetImage(image *libimage.Image, resolvedImageName string) { + s.image = image + s.resolvedImageName = resolvedImageName +} + +// Image returns the associated image for the generator. +// May be nil if no image has been set yet. +func (s *SpecGenerator) GetImage() (*libimage.Image, string) { + return s.image, s.resolvedImageName +} + +type Secret struct { + Source string + Target string + UID uint32 + GID uint32 + Mode uint32 +} + +var ( + // ErrNoStaticIPRootless is used when a rootless user requests to assign a static IP address + // to a pod or container + ErrNoStaticIPRootless error = errors.New("rootless containers and pods cannot be assigned static IP addresses") + // ErrNoStaticMACRootless is used when a rootless user requests to assign a static MAC address + // to a pod or container + ErrNoStaticMACRootless error = errors.New("rootless containers and pods cannot be assigned static MAC addresses") +) + +// NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs +func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator { + csc := ContainerStorageConfig{} + if rootfs { + csc.Rootfs = arg + // check if rootfs should use overlay + lastColonIndex := strings.LastIndex(csc.Rootfs, ":") + if lastColonIndex != -1 && lastColonIndex+1 < len(csc.Rootfs) && csc.Rootfs[lastColonIndex+1:] == "O" { + csc.RootfsOverlay = true + csc.Rootfs = csc.Rootfs[:lastColonIndex] + } + } else { + csc.Image = arg + } + return &SpecGenerator{ + ContainerStorageConfig: csc, + } +} + +// NewSpecGenerator returns a SpecGenerator struct given one of two mandatory inputs +func NewSpecGeneratorWithRootfs(rootfs string) *SpecGenerator { + csc := ContainerStorageConfig{Rootfs: rootfs} + return &SpecGenerator{ContainerStorageConfig: csc} +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go b/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go new file mode 100644 index 00000000000..b26666df3db --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go @@ -0,0 +1,177 @@ +package specgen + +import ( + "strings" + + "github.com/containers/common/pkg/parse" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NamedVolume holds information about a named volume that will be mounted into +// the container. +type NamedVolume struct { + // Name is the name of the named volume to be mounted. May be empty. + // If empty, a new named volume with a pseudorandomly generated name + // will be mounted at the given destination. + Name string + // Destination to mount the named volume within the container. Must be + // an absolute path. Path will be created if it does not exist. + Dest string + // Options are options that the named volume will be mounted with. + Options []string +} + +// OverlayVolume holds information about a overlay volume that will be mounted into +// the container. +type OverlayVolume struct { + // Destination is the absolute path where the mount will be placed in the container. + Destination string `json:"destination"` + // Source specifies the source path of the mount. + Source string `json:"source,omitempty"` + // Options holds overlay volume options. + Options []string `json:"options,omitempty"` +} + +// ImageVolume is a volume based on a container image. The container image is +// first mounted on the host and is then bind-mounted into the container. An +// ImageVolume is always mounted read only. +type ImageVolume struct { + // Source is the source of the image volume. The image can be referred + // to by name and by ID. + Source string + // Destination is the absolute path of the mount in the container. + Destination string + // ReadWrite sets the volume writable. + ReadWrite bool +} + +// GenVolumeMounts parses user input into mounts, volumes and overlay volumes +func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*NamedVolume, map[string]*OverlayVolume, error) { + errDuplicateDest := errors.Errorf("duplicate mount destination") + + mounts := make(map[string]spec.Mount) + volumes := make(map[string]*NamedVolume) + overlayVolumes := make(map[string]*OverlayVolume) + + volumeFormatErr := errors.Errorf("incorrect volume format, should be [host-dir:]ctr-dir[:option]") + + for _, vol := range volumeFlag { + var ( + options []string + src string + dest string + err error + ) + + splitVol := SplitVolumeString(vol) + if len(splitVol) > 3 { + return nil, nil, nil, errors.Wrapf(volumeFormatErr, vol) + } + + src = splitVol[0] + if len(splitVol) == 1 { + // This is an anonymous named volume. Only thing given + // is destination. + // Name/source will be blank, and populated by libpod. + src = "" + dest = splitVol[0] + } else if len(splitVol) > 1 { + dest = splitVol[1] + } + if len(splitVol) > 2 { + if options, err = parse.ValidateVolumeOpts(strings.Split(splitVol[2], ",")); err != nil { + return nil, nil, nil, err + } + } + + // Do not check source dir for anonymous volumes + if len(splitVol) > 1 { + if len(src) == 0 { + return nil, nil, nil, errors.New("host directory cannot be empty") + } + } + + if strings.HasPrefix(src, "/") || strings.HasPrefix(src, ".") || isHostWinPath(src) { + // This is not a named volume + overlayFlag := false + chownFlag := false + for _, o := range options { + if o == "O" { + overlayFlag = true + + joinedOpts := strings.Join(options, "") + if strings.Contains(joinedOpts, "U") { + chownFlag = true + } + + if len(options) > 2 || (len(options) == 2 && !chownFlag) { + return nil, nil, nil, errors.New("can't use 'O' with other options") + } + } + } + if overlayFlag { + // This is a overlay volume + newOverlayVol := new(OverlayVolume) + newOverlayVol.Destination = dest + newOverlayVol.Source = src + newOverlayVol.Options = options + + if _, ok := overlayVolumes[newOverlayVol.Destination]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, newOverlayVol.Destination) + } + overlayVolumes[newOverlayVol.Destination] = newOverlayVol + } else { + newMount := spec.Mount{ + Destination: dest, + Type: "bind", + Source: src, + Options: options, + } + if _, ok := mounts[newMount.Destination]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, newMount.Destination) + } + mounts[newMount.Destination] = newMount + } + } else { + // This is a named volume + newNamedVol := new(NamedVolume) + newNamedVol.Name = src + newNamedVol.Dest = dest + newNamedVol.Options = options + + if _, ok := volumes[newNamedVol.Dest]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, newNamedVol.Dest) + } + volumes[newNamedVol.Dest] = newNamedVol + } + + logrus.Debugf("User mount %s:%s options %v", src, dest, options) + } + + return mounts, volumes, overlayVolumes, nil +} + +// Splits a volume string, accounting for Win drive paths +// when running as a WSL linux guest or Windows client +func SplitVolumeString(vol string) []string { + parts := strings.Split(vol, ":") + if !shouldResolveWinPaths() { + return parts + } + + // Skip extended marker prefix if present + n := 0 + if strings.HasPrefix(vol, `\\?\`) { + n = 4 + } + + if hasWinDriveScheme(vol, n) { + first := parts[0] + ":" + parts[1] + parts = parts[1:] + parts[0] = first + } + + return parts +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go new file mode 100644 index 00000000000..0df4ebdd7b7 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go @@ -0,0 +1,60 @@ +package specgen + +import ( + "fmt" + "strings" + "unicode" + + "github.com/pkg/errors" +) + +func isHostWinPath(path string) bool { + return shouldResolveWinPaths() && strings.HasPrefix(path, `\\`) || hasWinDriveScheme(path, 0) || winPathExists(path) +} + +func hasWinDriveScheme(path string, start int) bool { + if len(path) < start+2 || path[start+1] != ':' { + return false + } + + drive := rune(path[start]) + return drive < unicode.MaxASCII && unicode.IsLetter(drive) +} + +// Converts a Windows path to a WSL guest path if local env is a WSL linux guest or this is a Windows client. +func ConvertWinMountPath(path string) (string, error) { + if !shouldResolveWinPaths() { + return path, nil + } + + if strings.HasPrefix(path, "/") { + // Handle /[driveletter]/windows/path form (e.g. c:\Users\bar == /c/Users/bar) + if len(path) > 2 && path[2] == '/' && shouldResolveUnixWinVariant(path) { + drive := unicode.ToLower(rune(path[1])) + if unicode.IsLetter(drive) && drive <= unicode.MaxASCII { + return fmt.Sprintf("/mnt/%c/%s", drive, path[3:]), nil + } + } + + // unix path - pass through + return path, nil + } + + // Convert remote win client relative paths to absolute + path = resolveRelativeOnWindows(path) + + // Strip extended marker prefix if present + path = strings.TrimPrefix(path, `\\?\`) + + // Drive installed via wsl --mount + switch { + case strings.HasPrefix(path, `\\.\`): + path = "/mnt/wsl/" + path[4:] + case len(path) > 1 && path[1] == ':': + path = "/mnt/" + strings.ToLower(path[0:1]) + path[2:] + default: + return path, errors.New("unsupported UNC path") + } + + return strings.ReplaceAll(path, `\`, "/"), nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go new file mode 100644 index 00000000000..f42ac76399d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go @@ -0,0 +1,24 @@ +package specgen + +import ( + "os" + + "github.com/containers/common/pkg/machine" +) + +func shouldResolveWinPaths() bool { + return machine.MachineHostType() == "wsl" +} + +func shouldResolveUnixWinVariant(path string) bool { + _, err := os.Stat(path) + return err != nil +} + +func resolveRelativeOnWindows(path string) string { + return path +} + +func winPathExists(path string) bool { + return false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go new file mode 100644 index 00000000000..4cd008fddcd --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go @@ -0,0 +1,20 @@ +//go:build !linux && !windows +// +build !linux,!windows + +package specgen + +func shouldResolveWinPaths() bool { + return false +} + +func shouldResolveUnixWinVariant(path string) bool { + return false +} + +func resolveRelativeOnWindows(path string) string { + return path +} + +func winPathExists(path string) bool { + return false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go new file mode 100644 index 00000000000..c6aad314a22 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go @@ -0,0 +1,30 @@ +package specgen + +import ( + "github.com/sirupsen/logrus" + "os" + "path/filepath" +) + +func shouldResolveUnixWinVariant(path string) bool { + return true +} + +func shouldResolveWinPaths() bool { + return true +} + +func resolveRelativeOnWindows(path string) string { + ret, err := filepath.Abs(path) + if err != nil { + logrus.Debugf("problem resolving possible relative path %q: %s", path, err.Error()) + return path + } + + return ret +} + +func winPathExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/terminal/console_unix.go b/vendor/github.com/containers/podman/v4/pkg/terminal/console_unix.go new file mode 100644 index 00000000000..53290be2492 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/terminal/console_unix.go @@ -0,0 +1,9 @@ +//go:build !windows +// +build !windows + +package terminal + +// SetConsole for non-windows environments is a no-op. +func SetConsole() error { + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/terminal/console_windows.go b/vendor/github.com/containers/podman/v4/pkg/terminal/console_windows.go new file mode 100644 index 00000000000..1a7da333511 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/terminal/console_windows.go @@ -0,0 +1,38 @@ +//go:build windows +// +build windows + +package terminal + +import ( + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +// SetConsole switches the windows terminal mode to be able to handle colors, etc +func SetConsole() error { + if err := setConsoleMode(windows.Stdout, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err != nil { + return err + } + if err := setConsoleMode(windows.Stderr, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err != nil { + return err + } + if err := setConsoleMode(windows.Stdin, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err != nil { + return err + } + return nil +} + +func setConsoleMode(handle windows.Handle, flags uint32) error { + var mode uint32 + err := windows.GetConsoleMode(handle, &mode) + if err != nil { + return nil // not a terminal + } + if err := windows.SetConsoleMode(handle, mode|flags); err != nil { + // In similar code, it is not considered an error if we cannot set the + // console mode. Following same line of thinking here. + logrus.WithError(err).Debug("Failed to set console mode for cli") + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/terminal/util.go b/vendor/github.com/containers/podman/v4/pkg/terminal/util.go new file mode 100644 index 00000000000..0f0968c30b4 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/terminal/util.go @@ -0,0 +1,134 @@ +package terminal + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/containers/storage/pkg/homedir" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/knownhosts" + "golang.org/x/term" +) + +var ( + passPhrase []byte + phraseSync sync.Once + password []byte + passwordSync sync.Once +) + +// ReadPassword prompts for a secret and returns value input by user from stdin +// Unlike terminal.ReadPassword(), $(echo $SECRET | podman...) is supported. +// Additionally, all input after `/n` is queued to podman command. +func ReadPassword(prompt string) (pw []byte, err error) { + fd := int(os.Stdin.Fd()) + if term.IsTerminal(fd) { + fmt.Fprint(os.Stderr, prompt) + pw, err = term.ReadPassword(fd) + fmt.Fprintln(os.Stderr) + return + } + + var b [1]byte + for { + n, err := os.Stdin.Read(b[:]) + // terminal.ReadPassword discards any '\r', so we do the same + if n > 0 && b[0] != '\r' { + if b[0] == '\n' { + return pw, nil + } + pw = append(pw, b[0]) + // limit size, so that a wrong input won't fill up the memory + if len(pw) > 1024 { + err = errors.New("password too long, 1024 byte limit") + } + } + if err != nil { + // terminal.ReadPassword accepts EOF-terminated passwords + // if non-empty, so we do the same + if err == io.EOF && len(pw) > 0 { + err = nil + } + return pw, err + } + } +} + +func PublicKey(path string, passphrase []byte) (ssh.Signer, error) { + key, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + if _, ok := err.(*ssh.PassphraseMissingError); !ok { + return nil, err + } + if len(passphrase) == 0 { + passphrase = ReadPassphrase() + } + return ssh.ParsePrivateKeyWithPassphrase(key, passphrase) + } + return signer, nil +} + +func ReadPassphrase() []byte { + phraseSync.Do(func() { + secret, err := ReadPassword("Key Passphrase: ") + if err != nil { + secret = []byte{} + } + passPhrase = secret + }) + return passPhrase +} + +func ReadLogin() []byte { + passwordSync.Do(func() { + secret, err := ReadPassword("Login password: ") + if err != nil { + secret = []byte{} + } + password = secret + }) + return password +} + +func HostKey(host string) ssh.PublicKey { + // parse OpenSSH known_hosts file + // ssh or use ssh-keyscan to get initial key + knownHosts := filepath.Join(homedir.Get(), ".ssh", "known_hosts") + fd, err := os.Open(knownHosts) + if err != nil { + logrus.Error(err) + return nil + } + + // support -H parameter for ssh-keyscan + hashhost := knownhosts.HashHostname(host) + + scanner := bufio.NewScanner(fd) + for scanner.Scan() { + _, hosts, key, _, _, err := ssh.ParseKnownHosts(scanner.Bytes()) + if err != nil { + logrus.Errorf("Failed to parse known_hosts: %s", scanner.Text()) + continue + } + + for _, h := range hosts { + if h == host || h == hashhost { + return key + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go b/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go new file mode 100644 index 00000000000..5e9c6a1591d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go @@ -0,0 +1,132 @@ +package timetype + +// code adapted from https://github.com/moby/moby/blob/master/api/types/time/timestamp.go + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + switch { + case strings.Contains(value, "."): + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + case strings.Contains(value, "T"): + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + case parseInLocation: + format = dateLocal + default: + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/config.go b/vendor/github.com/containers/podman/v4/pkg/trust/config.go new file mode 100644 index 00000000000..6186d4cbd19 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/trust/config.go @@ -0,0 +1,12 @@ +package trust + +// Policy describes a basic trust policy configuration +type Policy struct { + Transport string `json:"transport"` + Name string `json:"name,omitempty"` + RepoName string `json:"repo_name,omitempty"` + Keys []string `json:"keys,omitempty"` + SignatureStore string `json:"sigstore,omitempty"` + Type string `json:"type"` + GPGId string `json:"gpg_id,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/trust.go b/vendor/github.com/containers/podman/v4/pkg/trust/trust.go new file mode 100644 index 00000000000..1d0cc61babf --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/trust/trust.go @@ -0,0 +1,243 @@ +package trust + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/containers/image/v5/types" + "github.com/docker/docker/pkg/homedir" + "github.com/ghodss/yaml" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PolicyContent struct for policy.json file +type PolicyContent struct { + Default []RepoContent `json:"default"` + Transports TransportsContent `json:"transports,omitempty"` +} + +// RepoContent struct used under each repo +type RepoContent struct { + Type string `json:"type"` + KeyType string `json:"keyType,omitempty"` + KeyPath string `json:"keyPath,omitempty"` + KeyData string `json:"keyData,omitempty"` + SignedIdentity json.RawMessage `json:"signedIdentity,omitempty"` +} + +// RepoMap map repo name to policycontent for each repo +type RepoMap map[string][]RepoContent + +// TransportsContent struct for content under "transports" +type TransportsContent map[string]RepoMap + +// RegistryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. +// NOTE: Keep this in sync with docs/registries.d.md! +type RegistryConfiguration struct { + DefaultDocker *RegistryNamespace `json:"default-docker"` + // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), + Docker map[string]RegistryNamespace `json:"docker"` +} + +// RegistryNamespace defines lookaside locations for a single namespace. +type RegistryNamespace struct { + SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. + SigStoreStaging string `json:"sigstore-staging"` // For writing only. +} + +// ShowOutput keep the fields for image trust show command +type ShowOutput struct { + Repo string + Trusttype string + GPGid string + Sigstore string +} + +// systemRegistriesDirPath is the path to registries.d. +const systemRegistriesDirPath = "/etc/containers/registries.d" + +// userRegistriesDir is the path to the per user registries.d. +var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") + +// DefaultPolicyPath returns a path to the default policy of the system. +func DefaultPolicyPath(sys *types.SystemContext) string { + systemDefaultPolicyPath := "/etc/containers/policy.json" + if sys != nil { + if sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + } + return systemDefaultPolicyPath +} + +// RegistriesDirPath returns a path to registries.d +func RegistriesDirPath(sys *types.SystemContext) string { + if sys != nil && sys.RegistriesDirPath != "" { + return sys.RegistriesDirPath + } + userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir) + if _, err := os.Stat(userRegistriesDirPath); err == nil { + return userRegistriesDirPath + } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) + } + + return systemRegistriesDirPath +} + +// LoadAndMergeConfig loads configuration files in dirPath +func LoadAndMergeConfig(dirPath string) (*RegistryConfiguration, error) { + mergedConfig := RegistryConfiguration{Docker: map[string]RegistryNamespace{}} + dockerDefaultMergedFrom := "" + nsMergedFrom := map[string]string{} + + dir, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + return &mergedConfig, nil + } + return nil, err + } + configNames, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, configName := range configNames { + if !strings.HasSuffix(configName, ".yaml") { + continue + } + configPath := filepath.Join(dirPath, configName) + configBytes, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, err + } + var config RegistryConfiguration + err = yaml.Unmarshal(configBytes, &config) + if err != nil { + return nil, errors.Wrapf(err, "error parsing %s", configPath) + } + if config.DefaultDocker != nil { + if mergedConfig.DefaultDocker != nil { + return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + dockerDefaultMergedFrom, configPath) + } + mergedConfig.DefaultDocker = config.DefaultDocker + dockerDefaultMergedFrom = configPath + } + for nsName, nsConfig := range config.Docker { // includes config.Docker == nil + if _, ok := mergedConfig.Docker[nsName]; ok { + return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + nsName, nsMergedFrom[nsName], configPath) + } + mergedConfig.Docker[nsName] = nsConfig + nsMergedFrom[nsName] = configPath + } + } + return &mergedConfig, nil +} + +// HaveMatchRegistry checks if trust settings for the registry have been configured in yaml file +func HaveMatchRegistry(key string, registryConfigs *RegistryConfiguration) *RegistryNamespace { + searchKey := key + if !strings.Contains(searchKey, "/") { + val, exists := registryConfigs.Docker[searchKey] + if exists { + return &val + } + } + for range strings.Split(key, "/") { + val, exists := registryConfigs.Docker[searchKey] + if exists { + return &val + } + if strings.Contains(searchKey, "/") { + searchKey = searchKey[:strings.LastIndex(searchKey, "/")] + } + } + return registryConfigs.DefaultDocker +} + +// CreateTmpFile creates a temp file under dir and writes the content into it +func CreateTmpFile(dir, pattern string, content []byte) (string, error) { + tmpfile, err := ioutil.TempFile(dir, pattern) + if err != nil { + return "", err + } + defer tmpfile.Close() + + if _, err := tmpfile.Write(content); err != nil { + return "", err + } + return tmpfile.Name(), nil +} + +// GetGPGIdFromKeyPath return user keyring from key path +func GetGPGIdFromKeyPath(path string) []string { + cmd := exec.Command("gpg2", "--with-colons", path) + results, err := cmd.Output() + if err != nil { + logrus.Errorf("Getting key identity: %s", err) + return nil + } + return parseUids(results) +} + +// GetGPGIdFromKeyData return user keyring from keydata +func GetGPGIdFromKeyData(key string) []string { + decodeKey, err := base64.StdEncoding.DecodeString(key) + if err != nil { + logrus.Errorf("%s, error decoding key data", err) + return nil + } + tmpfileName, err := CreateTmpFile("", "", decodeKey) + if err != nil { + logrus.Errorf("Creating key date temp file %s", err) + } + defer os.Remove(tmpfileName) + return GetGPGIdFromKeyPath(tmpfileName) +} + +func parseUids(colonDelimitKeys []byte) []string { + var parseduids []string + scanner := bufio.NewScanner(bytes.NewReader(colonDelimitKeys)) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "uid:") || strings.HasPrefix(line, "pub:") { + uid := strings.Split(line, ":")[9] + if uid == "" { + continue + } + parseduid := uid + if strings.Contains(uid, "<") && strings.Contains(uid, ">") { + parseduid = strings.SplitN(strings.SplitAfterN(uid, "<", 2)[1], ">", 2)[0] + } + parseduids = append(parseduids, parseduid) + } + } + return parseduids +} + +// GetPolicy parse policy.json into PolicyContent struct +func GetPolicy(policyPath string) (PolicyContent, error) { + var policyContentStruct PolicyContent + policyContent, err := ioutil.ReadFile(policyPath) + if err != nil { + return policyContentStruct, errors.Wrap(err, "unable to read policy file") + } + if err := json.Unmarshal(policyContent, &policyContentStruct); err != nil { + return policyContentStruct, errors.Wrapf(err, "could not parse trust policies from %s", policyPath) + } + return policyContentStruct, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/filters.go b/vendor/github.com/containers/podman/v4/pkg/util/filters.go new file mode 100644 index 00000000000..05ba4f82cdb --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/filters.go @@ -0,0 +1,128 @@ +package util + +import ( + "encoding/json" + "fmt" + "net/http" + "path/filepath" + "strings" + "time" + + "github.com/containers/podman/v4/pkg/timetype" + "github.com/pkg/errors" +) + +// ComputeUntilTimestamp extracts until timestamp from filters +func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { + invalid := time.Time{} + if len(filterValues) != 1 { + return invalid, errors.Errorf("specify exactly one timestamp for until") + } + ts, err := timetype.GetTimestamp(filterValues[0], time.Now()) + if err != nil { + return invalid, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return invalid, err + } + return time.Unix(seconds, nanoseconds), nil +} + +// filtersFromRequests extracts the "filters" parameter from the specified +// http.Request. The parameter can either be a `map[string][]string` as done +// in new versions of Docker and libpod, or a `map[string]map[string]bool` as +// done in older versions of Docker. We have to do a bit of Yoga to support +// both - just as Docker does as well. +// +// Please refer to https://github.com/containers/podman/issues/6899 for some +// background. +func FiltersFromRequest(r *http.Request) ([]string, error) { + var ( + compatFilters map[string]map[string]bool + filters map[string][]string + libpodFilters []string + raw []byte + ) + + if _, found := r.URL.Query()["filters"]; found { + raw = []byte(r.Form.Get("filters")) + } else if _, found := r.URL.Query()["Filters"]; found { + raw = []byte(r.Form.Get("Filters")) + } else { + return []string{}, nil + } + + // Backwards compat with older versions of Docker. + if err := json.Unmarshal(raw, &compatFilters); err == nil { + for filterKey, filterMap := range compatFilters { + for filterValue, toAdd := range filterMap { + if toAdd { + libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue)) + } + } + } + return libpodFilters, nil + } + + if err := json.Unmarshal(raw, &filters); err != nil { + return nil, err + } + + for filterKey, filterSlice := range filters { + for _, filterValue := range filterSlice { + libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue)) + } + } + + return libpodFilters, nil +} + +// PrepareFilters prepares a *map[string][]string of filters to be later searched +// in lipod and compat API to get desired filters +func PrepareFilters(r *http.Request) (*map[string][]string, error) { + filtersList, err := FiltersFromRequest(r) + if err != nil { + return nil, err + } + filterMap := map[string][]string{} + for _, filter := range filtersList { + split := strings.SplitN(filter, "=", 2) + if len(split) > 1 { + filterMap[split[0]] = append(filterMap[split[0]], split[1]) + } + } + return &filterMap, nil +} + +func matchPattern(pattern string, value string) bool { + if strings.Contains(pattern, "*") { + filter := fmt.Sprintf("*%s*", pattern) + filter = strings.ReplaceAll(filter, string(filepath.Separator), "|") + newName := strings.ReplaceAll(value, string(filepath.Separator), "|") + match, _ := filepath.Match(filter, newName) + return match + } + return false +} + +// MatchLabelFilters matches labels and returns true if they are valid +func MatchLabelFilters(filterValues []string, labels map[string]string) bool { +outer: + for _, filterValue := range filterValues { + filterArray := strings.SplitN(filterValue, "=", 2) + filterKey := filterArray[0] + if len(filterArray) > 1 { + filterValue = filterArray[1] + } else { + filterValue = "" + } + for labelKey, labelValue := range labels { + if ((labelKey == filterKey) || matchPattern(filterKey, labelKey)) && (filterValue == "" || labelValue == filterValue) { + continue outer + } + } + return false + } + return true +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/kube.go b/vendor/github.com/containers/podman/v4/pkg/util/kube.go new file mode 100644 index 00000000000..1255cdfc59b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/kube.go @@ -0,0 +1,16 @@ +package util + +const ( + // Kube annotation for podman volume driver. + VolumeDriverAnnotation = "volume.podman.io/driver" + // Kube annotation for podman volume type. + VolumeTypeAnnotation = "volume.podman.io/type" + // Kube annotation for podman volume device. + VolumeDeviceAnnotation = "volume.podman.io/device" + // Kube annotation for podman volume UID. + VolumeUIDAnnotation = "volume.podman.io/uid" + // Kube annotation for podman volume GID. + VolumeGIDAnnotation = "volume.podman.io/gid" + // Kube annotation for podman volume mount options. + VolumeMountOptsAnnotation = "volume.podman.io/mount-options" +) diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go new file mode 100644 index 00000000000..e37394619e3 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go @@ -0,0 +1,192 @@ +package util + +import ( + "strings" + + "github.com/pkg/errors" +) + +var ( + // ErrBadMntOption indicates that an invalid mount option was passed. + ErrBadMntOption = errors.Errorf("invalid mount option") + // ErrDupeMntOption indicates that a duplicate mount option was passed. + ErrDupeMntOption = errors.Errorf("duplicate mount option passed") +) + +type defaultMountOptions struct { + noexec bool + nosuid bool + nodev bool +} + +// ProcessOptions parses the options for a bind or tmpfs mount and ensures that +// they are sensible and follow convention. The isTmpfs variable controls +// whether extra, tmpfs-specific options will be allowed. +// The sourcePath variable, if not empty, contains a bind mount source. +func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) { + var ( + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap bool + ) + + newOptions := make([]string, 0, len(options)) + for _, opt := range options { + // Some options have parameters - size, mode + splitOpt := strings.SplitN(opt, "=", 2) + + // add advanced options such as upperdir=/path and workdir=/path, when overlay is specified + if foundOverlay { + if strings.Contains(opt, "upperdir") { + newOptions = append(newOptions, opt) + continue + } + if strings.Contains(opt, "workdir") { + newOptions = append(newOptions, opt) + continue + } + } + + if strings.HasPrefix(splitOpt[0], "idmap") { + if foundIdmap { + return nil, errors.Wrapf(ErrDupeMntOption, "the 'idmap' option can only be set once") + } + foundIdmap = true + newOptions = append(newOptions, opt) + continue + } + + switch splitOpt[0] { + case "O": + foundOverlay = true + case "volume-opt": + // Volume-opt should be relayed and processed by driver. + newOptions = append(newOptions, opt) + case "exec", "noexec": + if foundExec { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'noexec' and 'exec' can be used") + } + foundExec = true + case "suid", "nosuid": + if foundSuid { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nosuid' and 'suid' can be used") + } + foundSuid = true + case "nodev", "dev": + if foundDev { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'nodev' and 'dev' can be used") + } + foundDev = true + case "rw", "ro": + if foundWrite { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rw' and 'ro' can be used") + } + foundWrite = true + case "private", "rprivate", "slave", "rslave", "shared", "rshared", "unbindable", "runbindable": + if foundProp { + return nil, errors.Wrapf(ErrDupeMntOption, "only one root propagation mode can be used") + } + foundProp = true + case "size": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'size' option is only allowed with tmpfs mounts") + } + if foundSize { + return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs size can be specified") + } + foundSize = true + case "mode": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'mode' option is only allowed with tmpfs mounts") + } + if foundMode { + return nil, errors.Wrapf(ErrDupeMntOption, "only one tmpfs mode can be specified") + } + foundMode = true + case "tmpcopyup": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'tmpcopyup' option is only allowed with tmpfs mounts") + } + if foundCopyUp { + return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once") + } + foundCopyUp = true + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue + case "notmpcopyup": + if !isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'notmpcopyup' option is only allowed with tmpfs mounts") + } + if foundCopyUp { + return nil, errors.Wrapf(ErrDupeMntOption, "the 'tmpcopyup' or 'notmpcopyup' option can only be set once") + } + foundCopyUp = true + // do not propagate notmpcopyup to the OCI runtime + continue + case "bind", "rbind": + if isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'bind' and 'rbind' options are not allowed with tmpfs mounts") + } + if foundBind { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'rbind' and 'bind' can be used") + } + foundBind = true + case "z", "Z": + if isTmpfs { + return nil, errors.Wrapf(ErrBadMntOption, "the 'z' and 'Z' options are not allowed with tmpfs mounts") + } + if foundZ { + return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'z' and 'Z' can be used") + } + foundZ = true + case "U": + if foundU { + return nil, errors.Wrapf(ErrDupeMntOption, "the 'U' option can only be set once") + } + foundU = true + default: + return nil, errors.Wrapf(ErrBadMntOption, "unknown mount option %q", opt) + } + newOptions = append(newOptions, opt) + } + + if !foundWrite { + newOptions = append(newOptions, "rw") + } + if !foundProp { + newOptions = append(newOptions, "rprivate") + } + defaults, err := getDefaultMountOptions(sourcePath) + if err != nil { + return nil, err + } + if !foundExec && defaults.noexec { + newOptions = append(newOptions, "noexec") + } + if !foundSuid && defaults.nosuid { + newOptions = append(newOptions, "nosuid") + } + if !foundDev && defaults.nodev { + newOptions = append(newOptions, "nodev") + } + if isTmpfs && !foundCopyUp { + newOptions = append(newOptions, "tmpcopyup") + } + if !isTmpfs && !foundBind { + newOptions = append(newOptions, "rbind") + } + + return newOptions, nil +} + +func ParseDriverOpts(option string) (string, string, error) { + token := strings.SplitN(option, "=", 2) + if len(token) != 2 { + return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts") + } + opt := strings.SplitN(token[1], "=", 2) + if len(opt) != 2 { + return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts") + } + return opt[0], opt[1], nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_linux.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_linux.go new file mode 100644 index 00000000000..bc7c675f325 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_linux.go @@ -0,0 +1,23 @@ +package util + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func getDefaultMountOptions(path string) (defaultMountOptions, error) { + opts := defaultMountOptions{false, true, true} + if path == "" { + return opts, nil + } + var statfs unix.Statfs_t + if e := unix.Statfs(path, &statfs); e != nil { + return opts, &os.PathError{Op: "statfs", Path: path, Err: e} + } + opts.nodev = (statfs.Flags&unix.MS_NODEV == unix.MS_NODEV) + opts.noexec = (statfs.Flags&unix.MS_NOEXEC == unix.MS_NOEXEC) + opts.nosuid = (statfs.Flags&unix.MS_NOSUID == unix.MS_NOSUID) + + return opts, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go new file mode 100644 index 00000000000..64b4dd1d949 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package util + +func getDefaultMountOptions(path string) (opts defaultMountOptions, err error) { + return +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils.go b/vendor/github.com/containers/podman/v4/pkg/util/utils.go new file mode 100644 index 00000000000..1b766333097 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils.go @@ -0,0 +1,753 @@ +package util + +import ( + "encoding/json" + "fmt" + "io/fs" + "math" + "os" + "os/user" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/BurntSushi/toml" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/util" + "github.com/containers/image/v5/types" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/signal" + "github.com/containers/storage/pkg/idtools" + stypes "github.com/containers/storage/types" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/term" +) + +var containerConfig *config.Config + +func init() { + var err error + containerConfig, err = config.Default() + if err != nil { + logrus.Error(err) + os.Exit(1) + } +} + +// Helper function to determine the username/password passed +// in the creds string. It could be either or both. +func parseCreds(creds string) (string, string) { + if creds == "" { + return "", "" + } + up := strings.SplitN(creds, ":", 2) + if len(up) == 1 { + return up[0], "" + } + return up[0], up[1] +} + +// ParseRegistryCreds takes a credentials string in the form USERNAME:PASSWORD +// and returns a DockerAuthConfig +func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { + username, password := parseCreds(creds) + if username == "" { + fmt.Print("Username: ") + fmt.Scanln(&username) + } + if password == "" { + fmt.Print("Password: ") + termPassword, err := term.ReadPassword(0) + if err != nil { + return nil, errors.Wrapf(err, "could not read password from terminal") + } + password = string(termPassword) + } + + return &types.DockerAuthConfig{ + Username: username, + Password: password, + }, nil +} + +// StringInSlice is depracated, use containers/common/pkg/util/StringInSlice +func StringInSlice(s string, sl []string) bool { + return util.StringInSlice(s, sl) +} + +// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool +func StringMatchRegexSlice(s string, re []string) bool { + for _, r := range re { + m, err := regexp.MatchString(r, s) + if err == nil && m { + return true + } + } + return false +} + +// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported +// by containers/image, but containing additional fields that are not supported +// by OCIv1 (but are by Docker v2) - notably OnBuild. +type ImageConfig struct { + v1.ImageConfig + OnBuild []string +} + +// GetImageConfig produces a v1.ImageConfig from the --change flag that is +// accepted by several Podman commands. It accepts a (limited subset) of +// Dockerfile instructions. +func GetImageConfig(changes []string) (ImageConfig, error) { + // Valid changes: + // USER + // EXPOSE + // ENV + // ENTRYPOINT + // CMD + // VOLUME + // WORKDIR + // LABEL + // STOPSIGNAL + // ONBUILD + + config := ImageConfig{} + + for _, change := range changes { + // First, let's assume proper Dockerfile format - space + // separator between instruction and value + split := strings.SplitN(change, " ", 2) + + if len(split) != 2 { + split = strings.SplitN(change, "=", 2) + if len(split) != 2 { + return ImageConfig{}, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change) + } + } + + outerKey := strings.ToUpper(strings.TrimSpace(split[0])) + value := strings.TrimSpace(split[1]) + switch outerKey { + case "USER": + // Assume literal contents are the user. + if value == "" { + return ImageConfig{}, errors.Errorf("invalid change %q - must provide a value to USER", change) + } + config.User = value + case "EXPOSE": + // EXPOSE is either [portnum] or + // [portnum]/[proto] + // Protocol must be "tcp" or "udp" + splitPort := strings.Split(value, "/") + if len(splitPort) > 2 { + return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change) + } + portNum, err := strconv.Atoi(splitPort[0]) + if err != nil { + return ImageConfig{}, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change) + } + if portNum > 65535 || portNum <= 0 { + return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change) + } + proto := "tcp" + if len(splitPort) > 1 { + testProto := strings.ToLower(splitPort[1]) + switch testProto { + case "tcp", "udp": + proto = testProto + default: + return ImageConfig{}, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change) + } + } + if config.ExposedPorts == nil { + config.ExposedPorts = make(map[string]struct{}) + } + config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{} + case "ENV": + // Format is either: + // ENV key=value + // ENV key=value key=value ... + // ENV key value + // Both keys and values can be surrounded by quotes to group them. + // For now: we only support key=value + // We will attempt to strip quotation marks if present. + + var ( + key, val string + ) + + splitEnv := strings.SplitN(value, "=", 2) + key = splitEnv[0] + // We do need a key + if key == "" { + return ImageConfig{}, errors.Errorf("invalid change %q - ENV must have at least one argument", change) + } + // Perfectly valid to not have a value + if len(splitEnv) == 2 { + val = splitEnv[1] + } + + if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { + key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) + } + if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { + val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) + } + config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val)) + case "ENTRYPOINT": + // Two valid forms. + // First, JSON array. + // Second, not a JSON array - we interpret this as an + // argument to `sh -c`, unless empty, in which case we + // just use a blank entrypoint. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // It ain't valid JSON, so assume it's an + // argument to sh -c if not empty. + if value != "" { + config.Entrypoint = []string{"/bin/sh", "-c", value} + } else { + config.Entrypoint = []string{} + } + } else { + // Valid JSON + config.Entrypoint = testUnmarshal + } + case "CMD": + // Same valid forms as entrypoint. + // However, where ENTRYPOINT assumes that 'ENTRYPOINT ' + // means no entrypoint, CMD assumes it is 'sh -c' with + // no third argument. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // It ain't valid JSON, so assume it's an + // argument to sh -c. + // Only include volume if it's not "" + config.Cmd = []string{"/bin/sh", "-c"} + if value != "" { + config.Cmd = append(config.Cmd, value) + } + } else { + // Valid JSON + config.Cmd = testUnmarshal + } + case "VOLUME": + // Either a JSON array or a set of space-separated + // paths. + // Acts rather similar to ENTRYPOINT and CMD, but always + // appends rather than replacing, and no sh -c prepend. + testUnmarshal := []string{} + if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil { + // Not valid JSON, so split on spaces + testUnmarshal = strings.Split(value, " ") + } + if len(testUnmarshal) == 0 { + return ImageConfig{}, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change) + } + for _, vol := range testUnmarshal { + if vol == "" { + return ImageConfig{}, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change) + } + if config.Volumes == nil { + config.Volumes = make(map[string]struct{}) + } + config.Volumes[vol] = struct{}{} + } + case "WORKDIR": + // This can be passed multiple times. + // Each successive invocation is treated as relative to + // the previous one - so WORKDIR /A, WORKDIR b, + // WORKDIR c results in /A/b/c + // Just need to check it's not empty... + if value == "" { + return ImageConfig{}, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change) + } + config.WorkingDir = filepath.Join(config.WorkingDir, value) + case "LABEL": + // Same general idea as ENV, but we no longer allow " " + // as a separator. + // We didn't do that for ENV either, so nice and easy. + // Potentially problematic: LABEL might theoretically + // allow an = in the key? If people really do this, we + // may need to investigate more advanced parsing. + var ( + key, val string + ) + + splitLabel := strings.SplitN(value, "=", 2) + // Unlike ENV, LABEL must have a value + if len(splitLabel) != 2 { + return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change) + } + key = splitLabel[0] + val = splitLabel[1] + + if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { + key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) + } + if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) { + val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`) + } + // Check key after we strip quotations + if key == "" { + return ImageConfig{}, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change) + } + if config.Labels == nil { + config.Labels = make(map[string]string) + } + config.Labels[key] = val + case "STOPSIGNAL": + // Check the provided signal for validity. + killSignal, err := ParseSignal(value) + if err != nil { + return ImageConfig{}, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change) + } + config.StopSignal = fmt.Sprintf("%d", killSignal) + case "ONBUILD": + // Onbuild always appends. + if value == "" { + return ImageConfig{}, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change) + } + config.OnBuild = append(config.OnBuild, value) + default: + return ImageConfig{}, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey) + } + } + + return config, nil +} + +// ParseSignal parses and validates a signal name or number. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + // Strip off leading dash, to allow -1 or -HUP + basename := strings.TrimPrefix(rawSignal, "-") + + sig, err := signal.ParseSignal(basename) + if err != nil { + return -1, err + } + // 64 is SIGRTMAX; wish we could get this from a standard Go library + if sig < 1 || sig > 64 { + return -1, errors.Errorf("valid signals are 1 through 64") + } + return sig, nil +} + +// GetKeepIDMapping returns the mappings and the user to use when keep-id is used +func GetKeepIDMapping() (*stypes.IDMappingOptions, int, int, error) { + if !rootless.IsRootless() { + return nil, -1, -1, errors.New("keep-id is only supported in rootless mode") + } + options := stypes.IDMappingOptions{ + HostUIDMapping: false, + HostGIDMapping: false, + } + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + uid := rootless.GetRootlessUID() + gid := rootless.GetRootlessGID() + + uids, gids, err := rootless.GetConfiguredMappings() + if err != nil { + return nil, -1, -1, errors.Wrapf(err, "cannot read mappings") + } + if len(uids) == 0 || len(gids) == 0 { + return nil, -1, -1, errors.Wrapf(err, "keep-id requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly") + } + maxUID, maxGID := 0, 0 + for _, u := range uids { + maxUID += u.Size + } + for _, g := range gids { + maxGID += g.Size + } + + options.UIDMap, options.GIDMap = nil, nil + + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) + if maxUID > uid { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) + } + + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) + if maxGID > gid { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) + } + + return &options, uid, gid, nil +} + +// GetNoMapMapping returns the mappings and the user to use when nomap is used +func GetNoMapMapping() (*stypes.IDMappingOptions, int, int, error) { + if !rootless.IsRootless() { + return nil, -1, -1, errors.New("nomap is only supported in rootless mode") + } + options := stypes.IDMappingOptions{ + HostUIDMapping: false, + HostGIDMapping: false, + } + uids, gids, err := rootless.GetConfiguredMappings() + if err != nil { + return nil, -1, -1, errors.Wrapf(err, "cannot read mappings") + } + if len(uids) == 0 || len(gids) == 0 { + return nil, -1, -1, errors.Wrapf(err, "nomap requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly") + } + options.UIDMap, options.GIDMap = nil, nil + uid, gid := 0, 0 + for _, u := range uids { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: uid + 1, Size: u.Size}) + uid += u.Size + } + for _, g := range gids { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: gid + 1, Size: g.Size}) + gid += g.Size + } + return &options, 0, 0, nil +} + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*stypes.IDMappingOptions, error) { + options := stypes.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + + if mode.IsAuto() { + var err error + options.HostUIDMapping = false + options.HostGIDMapping = false + options.AutoUserNs = true + opts, err := mode.GetAutoOptions() + if err != nil { + return nil, err + } + options.AutoUserNsOpts = *opts + return &options, nil + } + if mode.IsKeepID() || mode.IsNoMap() { + options.HostUIDMapping = false + options.HostGIDMapping = false + return &options, nil + } + + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(gidMapSlice) == 0 && len(uidMapSlice) != 0 { + gidMapSlice = uidMapSlice + } + if len(uidMapSlice) == 0 && len(gidMapSlice) != 0 { + uidMapSlice = gidMapSlice + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, err + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID") + if err != nil { + return nil, err + } + parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID") + if err != nil { + return nil, err + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} + +var ( + rootlessConfigHomeDirOnce sync.Once + rootlessConfigHomeDir string + rootlessRuntimeDirOnce sync.Once + rootlessRuntimeDir string +) + +type tomlOptionsConfig struct { + MountProgram string `toml:"mount_program"` +} + +type tomlConfig struct { + Storage struct { + Driver string `toml:"driver"` + RunRoot string `toml:"runroot"` + GraphRoot string `toml:"graphroot"` + Options struct{ tomlOptionsConfig } `toml:"options"` + } `toml:"storage"` +} + +func getTomlStorage(storeOptions *stypes.StoreOptions) *tomlConfig { + config := new(tomlConfig) + + config.Storage.Driver = storeOptions.GraphDriverName + config.Storage.RunRoot = storeOptions.RunRoot + config.Storage.GraphRoot = storeOptions.GraphRoot + for _, i := range storeOptions.GraphDriverOptions { + s := strings.SplitN(i, "=", 2) + if s[0] == "overlay.mount_program" && len(s) == 2 { + config.Storage.Options.MountProgram = s[1] + } + } + + return config +} + +// WriteStorageConfigFile writes the configuration to a file +func WriteStorageConfigFile(storageOpts *stypes.StoreOptions, storageConf string) error { + if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { + return err + } + storageFile, err := os.OpenFile(storageConf, os.O_RDWR|os.O_TRUNC, 0600) + if err != nil { + return err + } + tomlConfiguration := getTomlStorage(storageOpts) + defer errorhandling.CloseQuiet(storageFile) + enc := toml.NewEncoder(storageFile) + if err := enc.Encode(tomlConfiguration); err != nil { + if err := os.Remove(storageConf); err != nil { + logrus.Error(err) + } + return err + } + return nil +} + +// ParseInputTime takes the users input and to determine if it is valid and +// returns a time format and error. The input is compared to known time formats +// or a duration which implies no-duration +func ParseInputTime(inputTime string, since bool) (time.Time, error) { + timeFormats := []string{time.RFC3339Nano, time.RFC3339, "2006-01-02T15:04:05", "2006-01-02T15:04:05.999999999", + "2006-01-02Z07:00", "2006-01-02"} + // iterate the supported time formats + for _, tf := range timeFormats { + t, err := time.Parse(tf, inputTime) + if err == nil { + return t, nil + } + } + + unixTimestamp, err := strconv.ParseFloat(inputTime, 64) + if err == nil { + iPart, fPart := math.Modf(unixTimestamp) + return time.Unix(int64(iPart), int64(fPart*1_000_000_000)).UTC(), nil + } + + // input might be a duration + duration, err := time.ParseDuration(inputTime) + if err != nil { + return time.Time{}, errors.Errorf("unable to interpret time value") + } + if since { + return time.Now().Add(-duration), nil + } + return time.Now().Add(duration), nil +} + +// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist +func OpenExclusiveFile(path string) (*os.File, error) { + baseDir := filepath.Dir(path) + if baseDir != "" { + if _, err := os.Stat(baseDir); err != nil { + return nil, err + } + } + return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) +} + +// ExitCode reads the error message when failing to executing container process +// and then returns 0 if no error, 126 if command does not exist, or 127 for +// all other errors +func ExitCode(err error) int { + if err == nil { + return 0 + } + e := strings.ToLower(err.Error()) + if strings.Contains(e, "file not found") || + strings.Contains(e, "no such file or directory") { + return 127 + } + + return 126 +} + +// HomeDir returns the home directory for the current user. +func HomeDir() (string, error) { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.LookupId(fmt.Sprintf("%d", rootless.GetRootlessUID())) + if err != nil { + return "", errors.Wrapf(err, "unable to resolve HOME directory") + } + home = usr.HomeDir + } + return home, nil +} + +func Tmpdir() string { + tmpdir := os.Getenv("TMPDIR") + if tmpdir == "" { + tmpdir = "/var/tmp" + } + + return tmpdir +} + +// ValidateSysctls validates a list of sysctl and returns it. +func ValidateSysctls(strSlice []string) (map[string]string, error) { + sysctl := make(map[string]string) + validSysctlMap := map[string]bool{ + "kernel.msgmax": true, + "kernel.msgmnb": true, + "kernel.msgmni": true, + "kernel.sem": true, + "kernel.shmall": true, + "kernel.shmmax": true, + "kernel.shmmni": true, + "kernel.shm_rmid_forced": true, + } + validSysctlPrefixes := []string{ + "net.", + "fs.mqueue.", + } + + for _, val := range strSlice { + foundMatch := false + arr := strings.Split(val, "=") + if len(arr) < 2 { + return nil, errors.Errorf("%s is invalid, sysctl values must be in the form of KEY=VALUE", val) + } + + trimmed := fmt.Sprintf("%s=%s", strings.TrimSpace(arr[0]), strings.TrimSpace(arr[1])) + if trimmed != val { + return nil, errors.Errorf("'%s' is invalid, extra spaces found", val) + } + + if validSysctlMap[arr[0]] { + sysctl[arr[0]] = arr[1] + continue + } + + for _, prefix := range validSysctlPrefixes { + if strings.HasPrefix(arr[0], prefix) { + sysctl[arr[0]] = arr[1] + foundMatch = true + break + } + } + if !foundMatch { + return nil, errors.Errorf("sysctl '%s' is not allowed", arr[0]) + } + } + return sysctl, nil +} + +func DefaultContainerConfig() *config.Config { + return containerConfig +} + +func CreateCidFile(cidfile string, id string) error { + cidFile, err := OpenExclusiveFile(cidfile) + if err != nil { + if os.IsExist(err) { + return errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", cidfile) + } + return errors.Errorf("opening cidfile %s", cidfile) + } + if _, err = cidFile.WriteString(id); err != nil { + logrus.Error(err) + } + cidFile.Close() + return nil +} + +// DefaultCPUPeriod is the default CPU period (100ms) in microseconds, which is +// the same default as Kubernetes. +const DefaultCPUPeriod uint64 = 100000 + +// CoresToPeriodAndQuota converts a fraction of cores to the equivalent +// Completely Fair Scheduler (CFS) parameters period and quota. +// +// Cores is a fraction of the CFS period that a container may use. Period and +// Quota are in microseconds. +func CoresToPeriodAndQuota(cores float64) (uint64, int64) { + return DefaultCPUPeriod, int64(cores * float64(DefaultCPUPeriod)) +} + +// PeriodAndQuotaToCores takes the CFS parameters period and quota and returns +// a fraction that represents the limit to the number of cores that can be +// utilized over the scheduling period. +// +// Cores is a fraction of the CFS period that a container may use. Period and +// Quota are in microseconds. +func PeriodAndQuotaToCores(period uint64, quota int64) float64 { + return float64(quota) / float64(period) +} + +// IDtoolsToRuntimeSpec converts idtools ID mapping to the one of the runtime spec. +func IDtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxIDMapping) { + for _, idmap := range idMaps { + tempIDMap := specs.LinuxIDMapping{ + ContainerID: uint32(idmap.ContainerID), + HostID: uint32(idmap.HostID), + Size: uint32(idmap.Size), + } + convertedIDMap = append(convertedIDMap, tempIDMap) + } + return convertedIDMap +} + +func LookupUser(name string) (*user.User, error) { + // Assume UID look up first, if it fails lookup by username + if u, err := user.LookupId(name); err == nil { + return u, nil + } + return user.Lookup(name) +} + +// SizeOfPath determines the file usage of a given path. it was called volumeSize in v1 +// and now is made to be generic and take a path instead of a libpod volume +func SizeOfPath(path string) (uint64, error) { + var size uint64 + err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err == nil && !d.IsDir() { + info, err := d.Info() + if err != nil { + return err + } + size += uint64(info.Size()) + } + return err + }) + return size, err +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go new file mode 100644 index 00000000000..66ae85e9cc9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go @@ -0,0 +1,12 @@ +//go:build darwin +// +build darwin + +package util + +import ( + "github.com/pkg/errors" +) + +func GetContainerPidInformationDescriptors() ([]string, error) { + return []string{}, errors.New("this function is not supported on darwin") +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go new file mode 100644 index 00000000000..0b21bf3c537 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go @@ -0,0 +1,61 @@ +package util + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "syscall" + + "github.com/containers/psgo" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GetContainerPidInformationDescriptors returns a string slice of all supported +// format descriptors of GetContainerPidInformation. +func GetContainerPidInformationDescriptors() ([]string, error) { + return psgo.ListDescriptors(), nil +} + +// FindDeviceNodes parses /dev/ into a set of major:minor -> path, where +// [major:minor] is the device's major and minor numbers formatted as, for +// example, 2:0 and path is the path to the device node. +// Symlinks to nodes are ignored. +func FindDeviceNodes() (map[string]string, error) { + nodes := make(map[string]string) + err := filepath.WalkDir("/dev", func(path string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Warnf("Error descending into path %s: %v", path, err) + return filepath.SkipDir + } + + // If we aren't a device node, do nothing. + if d.Type()&(os.ModeDevice|os.ModeCharDevice) == 0 { + return nil + } + + info, err := d.Info() + if err != nil { + return err + } + // We are a device node. Get major/minor. + sysstat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("Could not convert stat output for use") + } + // We must typeconvert sysstat.Rdev from uint64->int to avoid constant overflow + rdev := int(sysstat.Rdev) + major := ((rdev >> 8) & 0xfff) | ((rdev >> 32) & ^0xfff) + minor := (rdev & 0xff) | ((rdev >> 12) & ^0xff) + + nodes[fmt.Sprintf("%d:%d", major, minor)] = path + + return nil + }) + if err != nil { + return nil, err + } + + return nodes, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go new file mode 100644 index 00000000000..50e4b1b7b17 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go @@ -0,0 +1,121 @@ +//go:build !windows +// +build !windows + +package util + +// TODO once rootless function is consolidated under libpod, we +// should work to take darwin from this + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/containers/podman/v4/pkg/rootless" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { + var rootlessRuntimeDirError error + + if !rootless.IsRootless() { + return "", nil + } + + rootlessRuntimeDirOnce.Do(func() { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + uid := fmt.Sprintf("%d", rootless.GetRootlessUID()) + if runtimeDir == "" { + tmpDir := filepath.Join("/run", "user", uid) + if err := os.MkdirAll(tmpDir, 0700); err != nil { + logrus.Debug(err) + } + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && (st.Mode().Perm()&0700 == 0700) { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) + if err := os.MkdirAll(tmpDir, 0700); err != nil { + logrus.Debug(err) + } + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && (st.Mode().Perm()&0700 == 0700) { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + home := os.Getenv("HOME") + if home == "" { + rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty") + return + } + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home) + return + } + runtimeDir = filepath.Join(resolvedHome, "rundir") + } + rootlessRuntimeDir = runtimeDir + }) + + if rootlessRuntimeDirError != nil { + return "", rootlessRuntimeDirError + } + return rootlessRuntimeDir, nil +} + +// GetRootlessConfigHomeDir returns the config home directory when running as non root +func GetRootlessConfigHomeDir() (string, error) { + var rootlessConfigHomeDirError error + + rootlessConfigHomeDirOnce.Do(func() { + cfgHomeDir := os.Getenv("XDG_CONFIG_HOME") + if cfgHomeDir == "" { + home := os.Getenv("HOME") + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + rootlessConfigHomeDirError = errors.Wrapf(err, "cannot resolve %s", home) + return + } + tmpDir := filepath.Join(resolvedHome, ".config") + st, err := os.Stat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() >= 0700 { + cfgHomeDir = tmpDir + } + } + rootlessConfigHomeDir = cfgHomeDir + }) + + if rootlessConfigHomeDirError != nil { + return "", rootlessConfigHomeDirError + } + + return rootlessConfigHomeDir, nil +} + +// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for +// the pause process. +// DEPRECATED - switch to GetRootlessPauseProcessPidPathGivenDir +func GetRootlessPauseProcessPidPath() (string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + return "", err + } + return filepath.Join(runtimeDir, "libpod", "pause.pid"), nil +} + +// GetRootlessPauseProcessPidPathGivenDir returns the path to the file that +// holds the PID of the pause process, given the location of Libpod's temporary +// files. +func GetRootlessPauseProcessPidPathGivenDir(libpodTmpDir string) (string, error) { + if libpodTmpDir == "" { + return "", errors.Errorf("must provide non-empty temporary directory") + } + return filepath.Join(libpodTmpDir, "pause.pid"), nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go new file mode 100644 index 00000000000..8963464933d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go @@ -0,0 +1,13 @@ +//go:build darwin || windows +// +build darwin windows + +package util + +import ( + "github.com/pkg/errors" +) + +// FindDeviceNodes is not implemented anywhere except Linux. +func FindDeviceNodes() (map[string]string, error) { + return nil, errors.Errorf("not supported on non-Linux OSes") +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go new file mode 100644 index 00000000000..b91680f7a4b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go @@ -0,0 +1,51 @@ +//go:build windows +// +build windows + +package util + +import ( + "path/filepath" + + "github.com/containers/storage/pkg/homedir" + "github.com/pkg/errors" +) + +var errNotImplemented = errors.New("not yet implemented") + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 unified mode. +func IsCgroup2UnifiedMode() (bool, error) { + return false, errors.Wrap(errNotImplemented, "IsCgroup2Unified") +} + +// GetContainerPidInformationDescriptors returns a string slice of all supported +// format descriptors of GetContainerPidInformation. +func GetContainerPidInformationDescriptors() ([]string, error) { + return nil, errors.Wrap(errNotImplemented, "GetContainerPidInformationDescriptors") +} + +// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for +// the pause process +func GetRootlessPauseProcessPidPath() (string, error) { + return "", errors.Wrap(errNotImplemented, "GetRootlessPauseProcessPidPath") +} + +// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for +// the pause process +func GetRootlessPauseProcessPidPathGivenDir(unused string) (string, error) { + return "", errors.Wrap(errNotImplemented, "GetRootlessPauseProcessPidPath") +} + +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { + data, err := homedir.GetDataHome() + if err != nil { + return "", err + } + runtimeDir := filepath.Join(data, "containers", "podman") + return runtimeDir, nil +} + +// GetRootlessConfigHomeDir returns the config home directory when running as non root +func GetRootlessConfigHomeDir() (string, error) { + return "", errors.New("this function is not implemented for windows") +} diff --git a/vendor/github.com/containers/podman/v4/utils/ports.go b/vendor/github.com/containers/podman/v4/utils/ports.go new file mode 100644 index 00000000000..57a6f82759e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/utils/ports.go @@ -0,0 +1,26 @@ +package utils + +import ( + "net" + "strconv" + + "github.com/pkg/errors" +) + +// Find a random, open port on the host. +func GetRandomPort() (int, error) { + l, err := net.Listen("tcp", ":0") + if err != nil { + return 0, errors.Wrapf(err, "unable to get free TCP port") + } + defer l.Close() + _, randomPort, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return 0, errors.Wrapf(err, "unable to determine free port") + } + rp, err := strconv.Atoi(randomPort) + if err != nil { + return 0, errors.Wrapf(err, "unable to convert random port to int") + } + return rp, nil +} diff --git a/vendor/github.com/containers/podman/v4/utils/utils.go b/vendor/github.com/containers/podman/v4/utils/utils.go new file mode 100644 index 00000000000..fd66ac2ed37 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/utils/utils.go @@ -0,0 +1,269 @@ +package utils + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "strconv" + "strings" + "sync" + + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/storage/pkg/archive" + "github.com/godbus/dbus/v5" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ExecCmd executes a command with args and returns its output as a string along +// with an error, if any. +func ExecCmd(name string, args ...string) (string, error) { + cmd := exec.Command(name, args...) + var stdout bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("`%v %v` failed: %v %v (%v)", name, strings.Join(args, " "), stderr.String(), stdout.String(), err) + } + + return stdout.String(), nil +} + +// ExecCmdWithStdStreams execute a command with the specified standard streams. +func ExecCmdWithStdStreams(stdin io.Reader, stdout, stderr io.Writer, env []string, name string, args ...string) error { + cmd := exec.Command(name, args...) + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + cmd.Env = env + + err := cmd.Run() + if err != nil { + return fmt.Errorf("`%v %v` failed: %v", name, strings.Join(args, " "), err) + } + + return nil +} + +// ErrDetach is an error indicating that the user manually detached from the +// container. +var ErrDetach = define.ErrDetach + +// CopyDetachable is similar to io.Copy but support a detach key sequence to break out. +func CopyDetachable(dst io.Writer, src io.Reader, keys []byte) (written int64, err error) { + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + preservBuf := []byte{} + for i, key := range keys { + preservBuf = append(preservBuf, buf[0:nr]...) + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + return 0, ErrDetach + } + nr, er = src.Read(buf) + } + var nw int + var ew error + if len(preservBuf) > 0 { + nw, ew = dst.Write(preservBuf) + nr = len(preservBuf) + } else { + nw, ew = dst.Write(buf[0:nr]) + } + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return written, err +} + +// UntarToFileSystem untars an os.file of a tarball to a destination in the filesystem +func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOptions) error { + logrus.Debugf("untarring %s", tarball.Name()) + return archive.Untar(tarball, dest, options) +} + +// Creates a new tar file and wrties bytes from io.ReadCloser +func CreateTarFromSrc(source string, dest string) error { + file, err := os.Create(dest) + if err != nil { + return errors.Wrapf(err, "Could not create tarball file '%s'", dest) + } + defer file.Close() + return TarToFilesystem(source, file) +} + +// TarToFilesystem creates a tarball from source and writes to an os.file +// provided +func TarToFilesystem(source string, tarball *os.File) error { + tb, err := Tar(source) + if err != nil { + return err + } + _, err = io.Copy(tarball, tb) + if err != nil { + return err + } + logrus.Debugf("wrote tarball file %s", tarball.Name()) + return nil +} + +// Tar creates a tarball from source and returns a readcloser of it +func Tar(source string) (io.ReadCloser, error) { + logrus.Debugf("creating tarball of %s", source) + return archive.Tar(source, archive.Uncompressed) +} + +// RemoveScientificNotationFromFloat returns a float without any +// scientific notation if the number has any. +// golang does not handle conversion of float64s that have scientific +// notation in them and otherwise stinks. please replace this if you have +// a better implementation. +func RemoveScientificNotationFromFloat(x float64) (float64, error) { + bigNum := strconv.FormatFloat(x, 'g', -1, 64) + breakPoint := strings.IndexAny(bigNum, "Ee") + if breakPoint > 0 { + bigNum = bigNum[:breakPoint] + } + result, err := strconv.ParseFloat(bigNum, 64) + if err != nil { + return x, errors.Wrapf(err, "unable to remove scientific number from calculations") + } + return result, nil +} + +var ( + runsOnSystemdOnce sync.Once + runsOnSystemd bool +) + +// RunsOnSystemd returns whether the system is using systemd +func RunsOnSystemd() bool { + runsOnSystemdOnce.Do(func() { + initCommand, err := ioutil.ReadFile("/proc/1/comm") + // On errors, default to systemd + runsOnSystemd = err != nil || strings.TrimRight(string(initCommand), "\n") == "systemd" + }) + return runsOnSystemd +} + +func moveProcessPIDFileToScope(pidPath, slice, scope string) error { + data, err := ioutil.ReadFile(pidPath) + if err != nil { + // do not raise an error if the file doesn't exist + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "cannot read pid file %s", pidPath) + } + pid, err := strconv.ParseUint(string(data), 10, 0) + if err != nil { + return errors.Wrapf(err, "cannot parse pid file %s", pidPath) + } + + return moveProcessToScope(int(pid), slice, scope) +} + +func moveProcessToScope(pid int, slice, scope string) error { + err := RunUnderSystemdScope(pid, slice, scope) + // If the PID is not valid anymore, do not return an error. + if dbusErr, ok := err.(dbus.Error); ok { + if dbusErr.Name == "org.freedesktop.DBus.Error.UnixProcessIdUnknown" { + return nil + } + } + return err +} + +// MoveRootlessNetnsSlirpProcessToUserSlice moves the slirp4netns process for the rootless netns +// into a different scope so that systemd does not kill it with a container. +func MoveRootlessNetnsSlirpProcessToUserSlice(pid int) error { + randBytes := make([]byte, 4) + _, err := rand.Read(randBytes) + if err != nil { + return err + } + return moveProcessToScope(pid, "user.slice", fmt.Sprintf("rootless-netns-%x.scope", randBytes)) +} + +// MovePauseProcessToScope moves the pause process used for rootless mode to keep the namespaces alive to +// a separate scope. +func MovePauseProcessToScope(pausePidPath string) { + var err error + + for i := 0; i < 10; i++ { + randBytes := make([]byte, 4) + _, err = rand.Read(randBytes) + if err != nil { + logrus.Errorf("failed to read random bytes: %v", err) + continue + } + err = moveProcessPIDFileToScope(pausePidPath, "user.slice", fmt.Sprintf("podman-pause-%x.scope", randBytes)) + if err == nil { + return + } + } + + if err != nil { + unified, err2 := cgroups.IsCgroup2UnifiedMode() + if err2 != nil { + logrus.Warnf("Failed to detect if running with cgroup unified: %v", err) + } + if RunsOnSystemd() && unified { + logrus.Warnf("Failed to add pause process to systemd sandbox cgroup: %v", err) + } else { + logrus.Debugf("Failed to add pause process to systemd sandbox cgroup: %v", err) + } + } +} + +// CreateSCPCommand takes an existing command, appends the given arguments and returns a configured podman command for image scp +func CreateSCPCommand(cmd *exec.Cmd, command []string) *exec.Cmd { + cmd.Args = append(cmd.Args, command...) + cmd.Env = os.Environ() + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + return cmd +} + +// LoginUser starts the user process on the host so that image scp can use systemd-run +func LoginUser(user string) (*exec.Cmd, error) { + sleep, err := exec.LookPath("sleep") + if err != nil { + return nil, err + } + machinectl, err := exec.LookPath("machinectl") + if err != nil { + return nil, err + } + cmd := exec.Command(machinectl, "shell", "-q", user+"@.host", sleep, "inf") + err = cmd.Start() + return cmd, err +} diff --git a/vendor/github.com/containers/podman/v4/utils/utils_supported.go b/vendor/github.com/containers/podman/v4/utils/utils_supported.go new file mode 100644 index 00000000000..c2dcc463152 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/utils/utils_supported.go @@ -0,0 +1,207 @@ +//go:build linux || darwin +// +build linux darwin + +package utils + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/pkg/rootless" + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// RunUnderSystemdScope adds the specified pid to a systemd scope +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + var properties []systemdDbus.Property + var conn *systemdDbus.Conn + var err error + + if rootless.IsRootless() { + conn, err = cgroups.GetUserConnection(rootless.GetRootlessUID()) + if err != nil { + return err + } + } else { + conn, err = systemdDbus.NewWithContext(context.Background()) + if err != nil { + return err + } + } + defer conn.Close() + properties = append(properties, systemdDbus.PropSlice(slice)) + properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) + properties = append(properties, newProp("Delegate", true)) + properties = append(properties, newProp("DefaultDependencies", false)) + ch := make(chan string) + _, err = conn.StartTransientUnitContext(context.Background(), unitName, "replace", properties, ch) + if err != nil { + // On errors check if the cgroup already exists, if it does move the process there + if props, err := conn.GetUnitTypePropertiesContext(context.Background(), unitName, "Scope"); err == nil { + if cgroup, ok := props["ControlGroup"].(string); ok && cgroup != "" { + if err := moveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { + return nil + } + // On errors return the original error message we got from StartTransientUnit. + } + } + return err + } + + // Block until job is started + <-ch + + return nil +} + +func getCgroupProcess(procFile string, allowRoot bool) (string, error) { + f, err := os.Open(procFile) + if err != nil { + return "", err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + cgroup := "" + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + if len(parts) != 3 { + return "", errors.Errorf("cannot parse cgroup line %q", line) + } + if strings.HasPrefix(line, "0::") { + cgroup = line[3:] + break + } + if len(parts[2]) > len(cgroup) { + cgroup = parts[2] + } + } + if len(cgroup) == 0 || (!allowRoot && cgroup == "/") { + return "", errors.Errorf("could not find cgroup mount in %q", procFile) + } + return cgroup, nil +} + +// GetOwnCgroup returns the cgroup for the current process. +func GetOwnCgroup() (string, error) { + return getCgroupProcess("/proc/self/cgroup", true) +} + +func GetOwnCgroupDisallowRoot() (string, error) { + return getCgroupProcess("/proc/self/cgroup", false) +} + +// GetCgroupProcess returns the cgroup for the specified process process. +func GetCgroupProcess(pid int) (string, error) { + return getCgroupProcess(fmt.Sprintf("/proc/%d/cgroup", pid), true) +} + +// MoveUnderCgroupSubtree moves the PID under a cgroup subtree. +func MoveUnderCgroupSubtree(subtree string) error { + return moveUnderCgroup("", subtree, nil) +} + +// moveUnderCgroup moves a group of processes to a new cgroup. +// If cgroup is the empty string, then the current calling process cgroup is used. +// If processes is empty, then the processes from the current cgroup are moved. +func moveUnderCgroup(cgroup, subtree string, processes []uint32) error { + procFile := "/proc/self/cgroup" + f, err := os.Open(procFile) + if err != nil { + return err + } + defer f.Close() + + unifiedMode, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return err + } + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + if len(parts) != 3 { + return errors.Errorf("cannot parse cgroup line %q", line) + } + + // root cgroup, skip it + if parts[2] == "/" { + continue + } + + cgroupRoot := "/sys/fs/cgroup" + // Special case the unified mount on hybrid cgroup and named hierarchies. + // This works on Fedora 31, but we should really parse the mounts to see + // where the cgroup hierarchy is mounted. + if parts[1] == "" && !unifiedMode { + // If it is not using unified mode, the cgroup v2 hierarchy is + // usually mounted under /sys/fs/cgroup/unified + cgroupRoot = filepath.Join(cgroupRoot, "unified") + + // Ignore the unified mount if it doesn't exist + if _, err := os.Stat(cgroupRoot); err != nil && os.IsNotExist(err) { + continue + } + } else if parts[1] != "" { + // Assume the controller is mounted at /sys/fs/cgroup/$CONTROLLER. + controller := strings.TrimPrefix(parts[1], "name=") + cgroupRoot = filepath.Join(cgroupRoot, controller) + } + + parentCgroup := cgroup + if parentCgroup == "" { + parentCgroup = parts[2] + } + newCgroup := filepath.Join(cgroupRoot, parentCgroup, subtree) + if err := os.MkdirAll(newCgroup, 0755); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.OpenFile(filepath.Join(newCgroup, "cgroup.procs"), os.O_RDWR, 0755) + if err != nil { + return err + } + defer f.Close() + + if len(processes) > 0 { + for _, pid := range processes { + if _, err := f.Write([]byte(fmt.Sprintf("%d\n", pid))); err != nil { + logrus.Debugf("Cannot move process %d to cgroup %q: %v", pid, newCgroup, err) + } + } + } else { + processesData, err := ioutil.ReadFile(filepath.Join(cgroupRoot, parts[2], "cgroup.procs")) + if err != nil { + return err + } + for _, pid := range bytes.Split(processesData, []byte("\n")) { + if len(pid) == 0 { + continue + } + if _, err := f.Write(pid); err != nil { + logrus.Debugf("Cannot move process %s to cgroup %q: %v", string(pid), newCgroup, err) + } + } + } + } + return nil +} + +func newProp(name string, units interface{}) systemdDbus.Property { + return systemdDbus.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} diff --git a/vendor/github.com/containers/podman/v4/utils/utils_windows.go b/vendor/github.com/containers/podman/v4/utils/utils_windows.go new file mode 100644 index 00000000000..1d017f5ae7e --- /dev/null +++ b/vendor/github.com/containers/podman/v4/utils/utils_windows.go @@ -0,0 +1,26 @@ +//go:build windows +// +build windows + +package utils + +import "github.com/pkg/errors" + +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + return errors.New("not implemented for windows") +} + +func MoveUnderCgroupSubtree(subtree string) error { + return errors.New("not implemented for windows") +} + +func GetOwnCgroup() (string, error) { + return "", errors.New("not implemented for windows") +} + +func GetOwnCgroupDisallowRoot() (string, error) { + return "", errors.New("not implemented for windows") +} + +func GetCgroupProcess(pid int) (string, error) { + return "", errors.New("not implemented for windows") +} diff --git a/vendor/github.com/containers/podman/v4/version/version.go b/vendor/github.com/containers/podman/v4/version/version.go new file mode 100644 index 00000000000..32f1ee0e762 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/version/version.go @@ -0,0 +1,47 @@ +package version + +import ( + "github.com/blang/semver" +) + +type ( + // Tree determines which API endpoint tree for version + Tree int + // Level determines which API level, current or something from the past + Level int +) + +const ( + // Libpod supports Libpod endpoints + Libpod = Tree(iota) + // Compat supports Libpod endpoints + Compat + + // CurrentAPI announces what is the current API level + CurrentAPI = Level(iota) + // MinimalAPI announces what is the oldest API level supported + MinimalAPI +) + +// Version is the version of the build. +// NOTE: remember to bump the version at the top +// of the top-level README.md file when this is +// bumped. +var Version = semver.MustParse("4.1.1") + +// See https://docs.docker.com/engine/api/v1.40/ +// libpod compat handlers are expected to honor docker API versions + +// APIVersion provides the current and minimal API versions for compat and libpod endpoint trees +// Note: GET|HEAD /_ping is never versioned and provides the API-Version and Libpod-API-Version headers to allow +// clients to shop for the Version they wish to support +var APIVersion = map[Tree]map[Level]semver.Version{ + Libpod: { + CurrentAPI: Version, + MinimalAPI: semver.MustParse("4.0.0"), + }, + Compat: { + CurrentAPI: semver.MustParse("1.40.0"), + MinimalAPI: semver.MustParse("1.24.0"), + }, +} diff --git a/vendor/github.com/containers/psgo/.codespellrc b/vendor/github.com/containers/psgo/.codespellrc new file mode 100644 index 00000000000..604bc21da43 --- /dev/null +++ b/vendor/github.com/containers/psgo/.codespellrc @@ -0,0 +1,2 @@ +[codespell] +skip = ./vendor,./.git diff --git a/vendor/github.com/containers/psgo/.gitignore b/vendor/github.com/containers/psgo/.gitignore new file mode 100644 index 00000000000..fc8fbcad3da --- /dev/null +++ b/vendor/github.com/containers/psgo/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# everything in build dir +bin/* diff --git a/vendor/github.com/containers/psgo/.golangci.yml b/vendor/github.com/containers/psgo/.golangci.yml new file mode 100644 index 00000000000..a098068fe93 --- /dev/null +++ b/vendor/github.com/containers/psgo/.golangci.yml @@ -0,0 +1,6 @@ +# For documentation, see https://golangci-lint.run/usage/configuration/ + +linters: + enable: + - errorlint + - gofumpt diff --git a/vendor/github.com/containers/psgo/CODE-OF-CONDUCT.md b/vendor/github.com/containers/psgo/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..1081553f81d --- /dev/null +++ b/vendor/github.com/containers/psgo/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The psgo Project Community Code of Conduct + +The psgo project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/psgo/LICENSE b/vendor/github.com/containers/psgo/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/containers/psgo/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/psgo/Makefile b/vendor/github.com/containers/psgo/Makefile new file mode 100644 index 00000000000..fb6126e7c66 --- /dev/null +++ b/vendor/github.com/containers/psgo/Makefile @@ -0,0 +1,55 @@ +SHELL= /bin/bash +GO ?= go +BUILD_DIR := ./bin +BIN_DIR := /usr/local/bin +NAME := psgo +BATS_TESTS := *.bats + +# Not all platforms support -buildmode=pie, plus it's incompatible with -race. +ifeq ($(shell $(GO) env GOOS),linux) + ifeq (,$(filter $(shell $(GO) env GOARCH),mips mipsle mips64 mips64le ppc64 riscv64)) + ifeq (,$(findstring -race,$(EXTRA_BUILD_FLAGS))) + GO_BUILDMODE := "-buildmode=pie" + endif + endif +endif +GO_BUILD := $(GO) build $(GO_BUILDMODE) + +all: validate build + +.PHONY: build +build: + $(GO_BUILD) $(EXTRA_BUILD_FLAGS) -o $(BUILD_DIR)/$(NAME) ./sample + +.PHONY: clean +clean: + rm -rf $(BUILD_DIR) + +.PHONY: vendor +vendor: + go mod tidy + go mod vendor + go mod verify + +.PHONY: validate +validate: + golangci-lint run + +.PHONY: test +test: test-unit test-integration + +.PHONY: test-integration +test-integration: + bats test/$(BATS_TESTS) + +.PHONY: test-unit +test-unit: + $(GO) test -v $(EXTRA_TEST_FLAGS) ./... + +.PHONY: install +install: + sudo install -D -m755 $(BUILD_DIR)/$(NAME) $(BIN_DIR) + +.PHONY: uninstall +uninstall: + sudo rm $(BIN_DIR)/$(NAME) diff --git a/vendor/github.com/containers/psgo/README.md b/vendor/github.com/containers/psgo/README.md new file mode 100644 index 00000000000..684c80a0c08 --- /dev/null +++ b/vendor/github.com/containers/psgo/README.md @@ -0,0 +1,104 @@ +[![GoDoc](https://godoc.org/github.com/containers/psgo?status.svg)](https://godoc.org/github.com/containers/psgo) [![Build Status](https://travis-ci.org/containers/psgo.svg?branch=master)](https://travis-ci.org/containers/psgo) + +# psgo +A ps(1) AIX-format compatible golang library extended with various descriptors useful for displaying container-related data. + +The idea behind the library is to provide an easy to use way of extracting process-related data, just as ps(1) does. The problem when using ps(1) is that the ps format strings split columns with whitespaces, making the output nearly impossible to parse. It also adds some jitter as we have to fork and execute ps either in the container or filter the output afterwards, further limiting applicability. + +This library aims to make things a bit more comfortable, especially for container runtimes, as the API allows to join the mount namespace of a given process and will parse `/proc` and `/dev/` from there. The API consists of the following functions: + + - `psgo.ProcessInfo(descriptors []string) ([][]string, error)` + - ProcessInfo returns the process information of all processes in the current mount namespace. The input descriptors must be a slice of supported AIX format descriptors in the normal form or in the code form, if supported. If the input descriptor slice is empty, the `psgo.DefaultDescriptors` are used. The return value contains the string slice of process data, one per process. + + - `psgo.ProcessInfoByPids(pids []string, descriptors []string) ([][]string, error)` + - ProcessInfoByPids is similar to `psgo.ProcessInfo`, but limits the return value to a list of specified pids. The pids input must be a slice of PIDs for which process information should be returned. If the input descriptor slice is empty, only the format descriptor headers are returned. + + - `psgo.JoinNamespaceAndProcessInfo(pid string, descriptors []string) ([][]string, error)` + - JoinNamespaceAndProcessInfo has the same semantics as ProcessInfo but joins the mount namespace of the specified pid before extracting data from /proc. This way, we can extract the `/proc` data from a container without executing any command inside the container. + + - `psgo.JoinNamespaceAndProcessInfoByPids(pids []string, descriptors []string) ([][]string, error)` + - JoinNamespaceAndProcessInfoByPids is similar to `psgo.JoinNamespaceAndProcessInfo` but takes a slice of pids as an argument. To avoid duplicate entries (e.g., when two or more containers share the same PID namespace), a given PID namespace will be joined only once. + + - `psgo.ListDescriptors() []string` + - ListDescriptors returns a sorted string slice of all supported AIX format descriptors in the normal form (e.g., "args,comm,user"). It can be useful in the context of bash-completion, help messages, etc. + +### Listing processes +We can use the [psgo](https://github.com/containers/psgo/blob/master/sample/sample.go) sample tool from this project to test the core components of this library. First, let's build `psgo` via `make build`. The binary is now located under `./bin/psgo`. By default `psgo` displays data about all running processes in the current mount namespace, similar to the output of `ps -ef`. + +``` +$ ./bin/psgo | head -n5 +USER PID PPID %CPU ELAPSED TTY TIME COMMAND +root 1 0 0.064 6h3m27.677997443s ? 13.98s systemd +root 2 0 0.000 6h3m27.678380128s ? 20ms [kthreadd] +root 4 2 0.000 6h3m27.678701852s ? 0s [kworker/0:0H] +root 6 2 0.000 6h3m27.678999508s ? 0s [mm_percpu_wq] +``` + +### Listing processes +You can use the `--pids` flag to restrict `psgo` output to a subset of processes. This option accepts a list of comma separate process IDs and will return exactly the same kind of information per process as the default output. + +``` +$ ./bin/psgo --pids 1,$(pgrep bash | tr "\n" ",") +USER PID PPID %CPU ELAPSED TTY TIME COMMAND +root 1 0 0.009 128h52m44.193475932s ? 40s systemd +root 20830 20827 0.000 105h2m44.19579679s pts/5 0s bash +root 25843 25840 0.000 102h56m4.196072027s pts/6 0s bash +``` + +### Listing processes within a container +Let's have a look at how we can use this library in the context of containers. As a simple show case, we'll start a Docker container, extract the process ID via `docker-inspect` and run the `psgo` binary to extract the data of running processes within that container. + +```shell +$ docker run -d alpine sleep 100 +473c9a05d4223b88ef7f5a9ac11e3d21e9914e012338425cc1cef853fc6c32a2 + +$ docker inspect --format '{{.State.Pid}}' 473c9 +5572 + +$ sudo ./bin/psgo -pids 5572 -join +USER PID PPID %CPU ELAPSED TTY TIME COMMAND +root 1 0 0.000 17.249905587s ? 0s sleep +``` + +### Format descriptors +The ps library is compatible with all AIX format descriptors of the ps command-line utility (see `man 1 ps` for details) but it also supports some additional descriptors that can be useful when seeking specific process-related information. + +- **capamb** + - Set of ambient capabilities. See capabilities(7) for more information. +- **capbnd** + - Set of bounding capabilities. See capabilities(7) for more information. +- **capeff** + - Set of effective capabilities. See capabilities(7) for more information. +- **capinh** + - Set of inheritable capabilities. See capabilities(7) for more information. +- **capprm** + - Set of permitted capabilities. See capabilities(7) for more information. +- **groups** + - Supplmentary groups inside the container. +- **hgroup** + - The corresponding effective group of a container process on the host. +- **hgroups** + - Supplmentary groups on the host. +- **hpid** + - The corresponding host PID of a container process. +- **huser** + - The corresponding effective user of a container process on the host. +- **label** + - Current security attributes of the process. +- **seccomp** + - Seccomp mode of the process (i.e., disabled, strict or filter). See seccomp(2) for more information. +- **state** + - Process state codes (e.g, **R** for *running*, **S** for *sleeping*). See proc(5) for more information. +- **stime** + - Process start time (e.g, "2019-12-09 10:50:36 +0100 CET). + +We can try out different format descriptors with the psgo binary: + +```shell +$ ./bin/psgo -format "pid, user, group, seccomp" | head -n5 +PID USER GROUP SECCOMP +1 root root disabled +2 root root disabled +4 root root disabled +6 root root disabled +``` diff --git a/vendor/github.com/containers/psgo/SECURITY.md b/vendor/github.com/containers/psgo/SECURITY.md new file mode 100644 index 00000000000..5d5ba254acd --- /dev/null +++ b/vendor/github.com/containers/psgo/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the psgo Project + +The psgo Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/psgo/internal/capabilities/capabilities.go b/vendor/github.com/containers/psgo/internal/capabilities/capabilities.go new file mode 100644 index 00000000000..1a60b96c418 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/capabilities/capabilities.go @@ -0,0 +1,89 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package capabilities provides a mapping from common kernel bit masks to the +// alphanumerical representation of kernel capabilities. See capabilities(7) +// for additional information. +package capabilities + +var ( + // capabilities are a mapping from a numerical value to the textual + // representation of a given capability. A map allows to easily check + // if a given value is included or not. + // + // NOTE: this map must be maintained and kept in sync with the + // ./include/uapi/linux/capability.h kernel header. + capabilities = map[uint]string{ + 0: "CHOWN", + 1: "DAC_OVERRIDE", + 2: "DAC_READ_SEARCH", + 3: "FOWNER", + 4: "FSETID", + 5: "KILL", + 6: "SETGID", + 7: "SETUID", + 8: "SETPCAP", + 9: "LINUX_IMMUTABLE", + 10: "NET_BIND_SERVICE", + 11: "NET_BROADCAST", + 12: "NET_ADMIN", + 13: "NET_RAW", + 14: "IPC_LOCK", + 15: "IPC_OWNER", + 16: "SYS_MODULE", + 17: "SYS_RAWIO", + 18: "SYS_CHROOT", + 19: "SYS_PTRACE", + 20: "SYS_PACCT", + 21: "SYS_ADMIN", + 22: "SYS_BOOT", + 23: "SYS_NICE", + 24: "SYS_RESOURCE", + 25: "SYS_TIME", + 26: "SYS_TTY_CONFIG", + 27: "MKNOD", + 28: "LEASE", + 29: "AUDIT_WRITE", + 30: "AUDIT_CONTROL", + 31: "SETFCAP", + 32: "MAC_OVERRIDE", + 33: "MAC_ADMIN", + 34: "SYSLOG", + 35: "WAKE_ALARM", + 36: "BLOCK_SUSPEND", + 37: "AUDIT_READ", + } + + // FullCAPs represents the value of a bitmask with a full capability + // set. + FullCAPs = uint64(0x3FFFFFFFFF) +) + +// TranslateMask iterates over mask and returns a slice of corresponding +// capabilities. If a bit is out of range of known capabilities, it is set as +// "unknown" to catch potential regressions when new capabilities are added to +// the kernel. +func TranslateMask(mask uint64) []string { + caps := []string{} + for i := uint(0); i < 64; i++ { + if (mask>>i)&0x1 == 1 { + c, known := capabilities[i] + if !known { + c = "unknown" + } + caps = append(caps, c) + } + } + return caps +} diff --git a/vendor/github.com/containers/psgo/internal/cgroups/cgroups.go b/vendor/github.com/containers/psgo/internal/cgroups/cgroups.go new file mode 100644 index 00000000000..eecaf87cb88 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/cgroups/cgroups.go @@ -0,0 +1,44 @@ +// Copyright 2019 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cgroups + +import ( + "sync" + "syscall" +) + +const ( + CgroupRoot = "/sys/fs/cgroup" + cgroup2SuperMagic = 0x63677270 +) + +var ( + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup or cgroupv2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs(CgroupRoot, &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == cgroup2SuperMagic, nil + } + }) + return isUnified, isUnifiedErr +} diff --git a/vendor/github.com/containers/psgo/internal/dev/tty.go b/vendor/github.com/containers/psgo/internal/dev/tty.go new file mode 100644 index 00000000000..863767f7529 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/dev/tty.go @@ -0,0 +1,125 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "os" + "strings" + "syscall" +) + +// TTY represents a tty including its minor and major device number and the +// path to the tty. +type TTY struct { + // Minor device number. + Minor uint64 + // Major device number. + Major uint64 + // Path to the tty device. + Path string +} + +// FindTTY return the corresponding TTY to the ttyNr or nil of non could be +// found. +func FindTTY(ttyNr uint64, devices *[]TTY) (*TTY, error) { + // (man 5 proc) The minor device number is contained in the combination + // of bits 31 to 20 and 7 to 0; the major device number is in bits 15 + // to 8. + maj := (ttyNr >> 8) & 0xFF + min := (ttyNr & 0xFF) | ((ttyNr >> 20) & 0xFFF) + + if devices == nil { + devs, err := TTYs() + if err != nil { + return nil, err + } + devices = devs + } + + for _, t := range *devices { + if t.Minor == min && t.Major == maj { + return &t, nil + } + } + + return nil, nil +} + +// majDevNum returns the major device number of rdev (see stat_t.Rdev). +func majDevNum(rdev uint64) uint64 { + return (rdev >> 8) & 0xfff +} + +// minDevNum returns the minor device number of rdev (see stat_t.Rdev). +func minDevNum(rdev uint64) uint64 { + return (rdev & 0xff) | ((rdev >> 12) & 0xfff00) +} + +// TTYs parses /dev for tty and pts devices. +func TTYs() (*[]TTY, error) { + devDir, err := os.Open("/dev/") + if err != nil { + return nil, err + } + defer devDir.Close() + + devices := []string{} + devTTYs, err := devDir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, d := range devTTYs { + if !strings.HasPrefix(d, "tty") { + continue + } + devices = append(devices, "/dev/"+d) + } + + devPTSDir, err := os.Open("/dev/pts/") + if err != nil { + return nil, err + } + defer devPTSDir.Close() + + devPTSs, err := devPTSDir.Readdirnames(0) + if err != nil { + return nil, err + } + for _, d := range devPTSs { + devices = append(devices, "/dev/pts/"+d) + } + + ttys := []TTY{} + for _, dev := range devices { + fi, err := os.Stat(dev) + if err != nil { + if os.IsNotExist(err) { + // catch race conditions + continue + } + return nil, err + } + s := fi.Sys().(*syscall.Stat_t) + t := TTY{ + // Rdev is type uint32 on mips arch so we have to cast to uint64 + Minor: minDevNum(uint64(s.Rdev)), + Major: majDevNum(uint64(s.Rdev)), + Path: dev, + } + ttys = append(ttys, t) + } + + return &ttys, nil +} diff --git a/vendor/github.com/containers/psgo/internal/host/host.go b/vendor/github.com/containers/psgo/internal/host/host.go new file mode 100644 index 00000000000..3c708a2b859 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/host/host.go @@ -0,0 +1,61 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package host extracts data from the host, such as the system's boot time or +// the tick rate of the system clock. +package host + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// BootTime parses /proc/uptime returns the boot time in seconds since the +// Epoch, 1970-01-01 00:00:00 +0000 (UTC). +func BootTime() (int64, error) { + if bootTime != nil { + return *bootTime, nil + } + + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + + btimeStr := "" + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) < 2 { + continue + } + if fields[0] == "btime" { + btimeStr = fields[1] + } + } + + if len(btimeStr) == 0 { + return 0, fmt.Errorf("couldn't extract boot time from /proc/stat") + } + + btimeSec, err := strconv.ParseInt(btimeStr, 10, 64) + if err != nil { + return 0, fmt.Errorf("error parsing boot time from /proc/stat: %w", err) + } + bootTime = &btimeSec + return btimeSec, nil +} diff --git a/vendor/github.com/containers/psgo/internal/host/host_cgo.go b/vendor/github.com/containers/psgo/internal/host/host_cgo.go new file mode 100644 index 00000000000..eac9fe5ce78 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/host/host_cgo.go @@ -0,0 +1,37 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package host extracts data from the host, such as the system's boot time or +// the tick rate of the system clock. +package host + +/* +#include +*/ +import "C" + +var ( + // cache host queries to redundant calculations + clockTicks *int64 + bootTime *int64 +) + +// ClockTicks returns sysconf(SC_CLK_TCK). +func ClockTicks() (int64, error) { + if clockTicks == nil { + ticks := int64(C.sysconf(C._SC_CLK_TCK)) + clockTicks = &ticks + } + return *clockTicks, nil +} diff --git a/vendor/github.com/containers/psgo/internal/host/host_nocgo.go b/vendor/github.com/containers/psgo/internal/host/host_nocgo.go new file mode 100644 index 00000000000..6ff33741517 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/host/host_nocgo.go @@ -0,0 +1,84 @@ +// +build !cgo + +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package host extracts data from the host, such as the system's boot time or +// the tick rate of the system clock. +package host + +import ( + "encoding/binary" + "fmt" + "io/ioutil" + "unsafe" +) + +var ( + // cache host queries to redundant calculations + clockTicks *int64 + bootTime *int64 +) + +func getNativeEndianness() binary.ByteOrder { + var i int32 = 0x00000001 + u := unsafe.Pointer(&i) + if *((*byte)(u)) == 0x01 { + return binary.LittleEndian + } + return binary.BigEndian +} + +const ( + atClktck = 17 +) + +func getFromAuxv(what uint, whatName string) (uint, error) { + dataLen := int(unsafe.Sizeof(int(0))) + p, err := ioutil.ReadFile("/proc/self/auxv") + if err != nil { + return 0, err + } + native := getNativeEndianness() + for i := 0; i < len(p); { + var k, v uint + + switch dataLen { + case 4: + k = uint(native.Uint32(p[i : i+dataLen])) + v = uint(native.Uint32(p[i+dataLen : i+dataLen*2])) + case 8: + k = uint(native.Uint64(p[i : i+dataLen])) + v = uint(native.Uint64(p[i+dataLen : i+dataLen*2])) + } + i += dataLen * 2 + if k == what { + return v, nil + } + } + return 0, fmt.Errorf("cannot find %s in auxv", whatName) +} + +// ClockTicks returns sysconf(SC_CLK_TCK). +func ClockTicks() (int64, error) { + if clockTicks == nil { + ret, err := getFromAuxv(atClktck, "AT_CLKTCK") + if err != nil { + return -1, err + } + ticks := int64(ret) + clockTicks = &ticks + } + return *clockTicks, nil +} diff --git a/vendor/github.com/containers/psgo/internal/proc/attr.go b/vendor/github.com/containers/psgo/internal/proc/attr.go new file mode 100644 index 00000000000..9ef694bdccd --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/proc/attr.go @@ -0,0 +1,38 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// ParseAttrCurrent returns the contents of /proc/$pid/attr/current of "?" if +// labeling is not supported on the host. +func ParseAttrCurrent(pid string) (string, error) { + data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%s/attr/current", pid)) + if err != nil { + _, err = os.Stat(fmt.Sprintf("/proc/%s", pid)) + if os.IsNotExist(err) { + // PID doesn't exist + return "", err + } + // PID exists but labeling seems to be unsupported + return "?", nil + } + return strings.Trim(string(data), "\n"), nil +} diff --git a/vendor/github.com/containers/psgo/internal/proc/cmdline.go b/vendor/github.com/containers/psgo/internal/proc/cmdline.go new file mode 100644 index 00000000000..66914954373 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/proc/cmdline.go @@ -0,0 +1,36 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "io/ioutil" +) + +// ParseCmdLine parses a /proc/$pid/cmdline file and returns a string slice. +func ParseCmdLine(pid string) ([]string, error) { + data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%s/cmdline", pid)) + if err != nil { + return nil, err + } + + cmdLine := []string{} + for _, rawCmd := range bytes.Split(data, []byte{0}) { + cmdLine = append(cmdLine, string(rawCmd)) + } + + return cmdLine, nil +} diff --git a/vendor/github.com/containers/psgo/internal/proc/ns.go b/vendor/github.com/containers/psgo/internal/proc/ns.go new file mode 100644 index 00000000000..9e77b865b39 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/proc/ns.go @@ -0,0 +1,73 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bufio" + "fmt" + "io" + "os" + + "github.com/containers/storage/pkg/idtools" +) + +// ParsePIDNamespace returns the content of /proc/$pid/ns/pid. +func ParsePIDNamespace(pid string) (string, error) { + pidNS, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid)) + if err != nil { + return "", err + } + return pidNS, nil +} + +// ParseUserNamespace returns the content of /proc/$pid/ns/user. +func ParseUserNamespace(pid string) (string, error) { + userNS, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/user", pid)) + if err != nil { + return "", err + } + return userNS, nil +} + +// ReadMappings reads the user namespace mappings at the specified path +func ReadMappings(path string) ([]idtools.IDMap, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + var mappings []idtools.IDMap + + buf := bufio.NewReader(file) + for { + line, _, err := buf.ReadLine() + if err != nil { + if err == io.EOF { //nolint:errorlint // False positive, see https://github.com/polyfloyd/go-errorlint/pull/12 + return mappings, nil + } + return nil, fmt.Errorf("cannot read line from %s: %w", path, err) + } + if line == nil { + return mappings, nil + } + + var containerID, hostID, size int + if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil { + return nil, fmt.Errorf("cannot parse %s: %w", string(line), err) + } + mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size}) + } +} diff --git a/vendor/github.com/containers/psgo/internal/proc/pids.go b/vendor/github.com/containers/psgo/internal/proc/pids.go new file mode 100644 index 00000000000..2687396e155 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/proc/pids.go @@ -0,0 +1,164 @@ +// Copyright 2018-2019 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/psgo/internal/cgroups" +) + +// GetPIDs extracts and returns all PIDs from /proc. +func GetPIDs() ([]string, error) { + procDir, err := os.Open("/proc/") + if err != nil { + return nil, err + } + defer procDir.Close() + + // extract string slice of all directories in procDir + pidDirs, err := procDir.Readdirnames(0) + if err != nil { + return nil, err + } + + pids := []string{} + for _, pidDir := range pidDirs { + _, err := strconv.Atoi(pidDir) + if err != nil { + // skip non-numerical entries (e.g., `/proc/softirqs`) + continue + } + pids = append(pids, pidDir) + } + + return pids, nil +} + +// GetPIDsFromCgroup returns a strings slice of all pids listed in pid's pids +// cgroup. It automatically detects if we're running in unified mode or not. +func GetPIDsFromCgroup(pid string) ([]string, error) { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + if unified { + return getPIDsFromCgroupV2(pid) + } + return getPIDsFromCgroupV1(pid) +} + +// getPIDsFromCgroupV1 returns a strings slice of all pids listed in pid's pids +// cgroup. +func getPIDsFromCgroupV1(pid string) ([]string, error) { + // First, find the corresponding path to the PID cgroup. + pidPath := fmt.Sprintf("/proc/%s/cgroup", pid) + f, err := os.Open(pidPath) + if err != nil { + return nil, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + cgroupPath := "" + for scanner.Scan() { + fields := strings.Split(scanner.Text(), ":") + if len(fields) != 3 { + continue + } + if fields[1] == "pids" { + cgroupPath = filepath.Join(cgroups.CgroupRoot, "pids", fields[2], "cgroup.procs") + break + } + } + + if cgroupPath == "" { + return nil, fmt.Errorf("couldn't find v1 pids group for PID %s", pid) + } + + // Second, extract the PIDs inside the cgroup. + f, err = os.Open(cgroupPath) + if err != nil { + if os.IsNotExist(err) { + // OCI runtimes might mount the container cgroup at the root, breaking what it showed + // in /proc/$PID/cgroup and the path. + // Check if the PID still exists to make sure the process is still alive. + if _, errStat := os.Stat(pidPath); errStat == nil { + cgroupPath = filepath.Join(cgroups.CgroupRoot, "pids", "cgroup.procs") + f, err = os.Open(cgroupPath) + } + } + if err != nil { + return nil, err + } + } + defer f.Close() + + pids := []string{} + scanner = bufio.NewScanner(f) + for scanner.Scan() { + pids = append(pids, scanner.Text()) + } + + return pids, nil +} + +// getPIDsFromCgroupV2 returns a strings slice of all pids listed in pid's pids +// cgroup. +func getPIDsFromCgroupV2(pid string) ([]string, error) { + // First, find the corresponding path to the PID cgroup. + f, err := os.Open(fmt.Sprintf("/proc/%s/cgroup", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + cgroupSlice := "" + for scanner.Scan() { + fields := strings.Split(scanner.Text(), ":") + if len(fields) != 3 { + continue + } + if fields[1] == "" { + cgroupSlice = fields[2] + break + } + } + + if cgroupSlice == "" { + return nil, fmt.Errorf("couldn't find v2 pids group for PID %s", pid) + } + + // Second, extract the PIDs inside the cgroup. + f, err = os.Open(filepath.Join(cgroups.CgroupRoot, cgroupSlice, "cgroup.procs")) + if err != nil { + return nil, err + } + defer f.Close() + + pids := []string{} + scanner = bufio.NewScanner(f) + for scanner.Scan() { + pids = append(pids, scanner.Text()) + } + + return pids, nil +} diff --git a/vendor/github.com/containers/psgo/internal/proc/stat.go b/vendor/github.com/containers/psgo/internal/proc/stat.go new file mode 100644 index 00000000000..e3286704cc4 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/proc/stat.go @@ -0,0 +1,170 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "errors" + "fmt" + "io/ioutil" + "strings" +) + +// Stat is a direct translation of a `/proc/[pid]/stat` file as described in +// the proc(5) manpage. Please note that it is not a full translation as not +// all fields are in the scope of this library and higher indices are +// Kernel-version dependent. +type Stat struct { + // (1) The process ID + Pid string + // (2) The filename of the executable, in parentheses. This is visible + // whether or not the executable is swapped out. + Comm string + // (3) The process state (e.g., running, sleeping, zombie, dead). + // Refer to proc(5) for further deatils. + State string + // (4) The PID of the parent of this process. + Ppid string + // (5) The process group ID of the process. + Pgrp string + // (6) The session ID of the process. + Session string + // (7) The controlling terminal of the process. (The minor device + // number is contained in the combination of bits 31 to 20 and 7 to 0; + // the major device number is in bits 15 to 8.) + TtyNr string + // (8) The ID of the foreground process group of the controlling + // terminal of the process. + Tpgid string + // (9) The kernel flags word of the process. For bit meanings, see the + // PF_* defines in the Linux kernel source file + // include/linux/sched.h. Details depend on the kernel version. + Flags string + // (10) The number of minor faults the process has made which have not + // required loading a memory page from disk. + Minflt string + // (11) The number of minor faults that the process's waited-for + // children have made. + Cminflt string + // (12) The number of major faults the process has made which have + // required loading a memory page from disk. + Majflt string + // (13) The number of major faults that the process's waited-for + // children have made. + Cmajflt string + // (14) Amount of time that this process has been scheduled in user + // mode, measured in clock ticks (divide by + // sysconf(_SC_CLK_TCK)). This includes guest time, guest_time + // (time spent running a virtual CPU, see below), so that applications + // that are not aware of the guest time field do not lose that time + // from their calculations. + Utime string + // (15) Amount of time that this process has been scheduled in kernel + // mode, measured in clock ticks (divide by sysconf(_SC_CLK_TCK)). + Stime string + // (16) Amount of time that this process's waited-for children have + // been scheduled in user mode, measured in clock ticks (divide by + // sysconf(_SC_CLK_TCK)). (See also times(2).) This includes guest + // time, cguest_time (time spent running a virtual CPU, see below). + Cutime string + // (17) Amount of time that this process's waited-for children have + // been scheduled in kernel mode, measured in clock ticks (divide by + // sysconf(_SC_CLK_TCK)). + Cstime string + // (18) (Explanation for Linux 2.6+) For processes running a real-time + // scheduling policy (policy below; see sched_setscheduler(2)), this is + // the negated scheduling pri- ority, minus one; that is, a number + // in the range -2 to -100, corresponding to real-time priorities 1 to + // 99. For processes running under a non-real-time scheduling + // policy, this is the raw nice value (setpriority(2)) as represented + // in the kernel. The kernel stores nice values as numbers in the + // range 0 (high) to 39 (low), corresponding to the user-visible nice + // range of -20 to 19. + Priority string + // (19) The nice value (see setpriority(2)), a value in the range 19 + // (low priority) to -20 (high priority). + Nice string + // (20) Number of threads in this process (since Linux 2.6). Before + // kernel 2.6, this field was hard coded to 0 as a placeholder for an + // earlier removed field. + NumThreads string + // (21) The time in jiffies before the next SIGALRM is sent to the + // process due to an interval timer. Since kernel 2.6.17, this + // field is no longer maintained, and is hard coded as 0. + Itrealvalue string + // (22) The time the process started after system boot. In kernels + // before Linux 2.6, this value was expressed in jiffies. Since + // Linux 2.6, the value is expressed in clock ticks (divide by + // sysconf(_SC_CLK_TCK)). + Starttime string + // (23) Virtual memory size in bytes. + Vsize string +} + +// readStat is used for mocking in unit tests. +var readStat = func(path string) (string, error) { + rawData, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + return string(rawData), nil +} + +// ParseStat parses the /proc/$pid/stat file and returns a Stat. +func ParseStat(pid string) (*Stat, error) { + data, err := readStat(fmt.Sprintf("/proc/%s/stat", pid)) + if err != nil { + return nil, err + } + + firstParen := strings.IndexByte(data, '(') + lastParen := strings.LastIndexByte(data, ')') + if firstParen == -1 || lastParen == -1 { + return nil, errors.New("invalid format in stat") + } + pidstr := data[0 : firstParen-1] + comm := data[firstParen+1 : lastParen] + rest := strings.Fields(data[lastParen+1:]) + fields := append([]string{pidstr, comm}, rest...) + + fieldAt := func(i int) string { + return fields[i-1] + } + + return &Stat{ + Pid: fieldAt(1), + Comm: fieldAt(2), + State: fieldAt(3), + Ppid: fieldAt(4), + Pgrp: fieldAt(5), + Session: fieldAt(6), + TtyNr: fieldAt(7), + Tpgid: fieldAt(8), + Flags: fieldAt(9), + Minflt: fieldAt(10), + Cminflt: fieldAt(11), + Majflt: fieldAt(12), + Cmajflt: fieldAt(13), + Utime: fieldAt(14), + Stime: fieldAt(15), + Cutime: fieldAt(16), + Cstime: fieldAt(17), + Priority: fieldAt(18), + Nice: fieldAt(19), + NumThreads: fieldAt(20), + Itrealvalue: fieldAt(21), + Starttime: fieldAt(22), + Vsize: fieldAt(23), + }, nil +} diff --git a/vendor/github.com/containers/psgo/internal/proc/status.go b/vendor/github.com/containers/psgo/internal/proc/status.go new file mode 100644 index 00000000000..1d2247cbd0c --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/proc/status.go @@ -0,0 +1,398 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/containers/storage/pkg/idtools" +) + +// Status is a direct translation of a `/proc/[pid]/status`, which provides much +// of the information in /proc/[pid]/stat and /proc/[pid]/statm in a format +// that's easier for humans to parse. +type Status struct { + // Name: Command run by this process. + Name string + // Umask: Process umask, expressed in octal with a leading zero; see + // umask(2). (Since Linux 4.7.) + Umask string + // State: Current state of the process. One of "R (running)", "S + // (sleeping)", "D (disk sleep)", "T (stopped)", "T (tracing stop)", "Z + // (zombie)", or "X (dead)". + State string + // Tgid: Thread group ID (i.e., Process ID). + Tgid string + // Ngid: NUMA group ID (0 if none; since Linux 3.13). + Ngid string + // Pid: Thread ID (see gettid(2)). + Pid string + // PPid: PID of parent process. + PPid string + // TracerPid: PID of process tracing this process (0 if not being traced). + TracerPid string + // Uids: Real, effective, saved set, and filesystem. + Uids []string + // Gids: Real, effective, saved set, and filesystem. + Gids []string + // FDSize: Number of file descriptor slots currently allocated. + FdSize string + // Groups: Supplementary group list. + Groups []string + // NStgid : Thread group ID (i.e., PID) in each of the PID namespaces + // of which [pid] is a member. The leftmost entry shows the value + // with respect to the PID namespace of the reading process, followed + // by the value in successively nested inner namespaces. (Since Linux + // 4.1.) + NStgid string + // NSpid: Thread ID in each of the PID namespaces of which [pid] is a + // member. The fields are ordered as for NStgid. (Since Linux 4.1.) + NSpid []string + // NSpgid: Process group ID in each of the PID namespaces of which + // [pid] is a member. The fields are ordered as for NStgid. (Since + // Linux 4.1.) + NSpgid string + // NSsid: descendant namespace session ID hierarchy Session ID in + // each of the PID names- paces of which [pid] is a member. The fields + // are ordered as for NStgid. (Since Linux 4.1.) + NSsid string + // VMPeak: Peak virtual memory size. + VMPeak string + // VMSize: Virtual memory size. + VMSize string + // VMLck: Locked memory size (see mlock(3)). + VMLCK string + // VMPin: Pinned memory size (since Linux 3.2). These are pages + // that can't be moved because something needs to directly access + // physical memory. + VMPin string + // VMHWM: Peak resident set size ("high water mark"). + VMHWM string + // VMRSS: Resident set size. Note that the value here is the sum of + // RssAnon, RssFile, and RssShmem. + VMRSS string + // RssAnon: Size of resident anonymous memory. (since Linux 4.5). + RssAnon string + // RssFile: Size of resident file mappings. (since Linux 4.5). + RssFile string + // RssShmem: Size of resident shared memory (includes System V + // shared memory, mappings from tmpfs(5), and shared anonymous + // mappings). (since Linux 4.5). + RssShmem string + // VMData: Size of data segment. + VMData string + // VMStk: Size of stack segment. + VMStk string + // VMExe: Size of text segment. + VMExe string + // VMLib: Shared library code size. + VMLib string + // VMPTE: Page table entries size (since Linux 2.6.10). + VMPTE string + // VMPMD: Size of second-level page tables (since Linux 4.0). + VMPMD string + // VMSwap: Swapped-out virtual memory size by anonymous private pages; + // shmem swap usage is not included (since Linux 2.6.34). + VMSwap string + // HugetlbPages: Size of hugetlb memory portions. (since Linux 4.4). + HugetlbPages string + // Threads: Number of threads in process containing this thread. + Threads string + // SigQ: This field contains two slash-separated numbers that relate to + // queued signals for the real user ID of this process. The first of + // these is the number of currently queued signals for this real + // user ID, and the second is the resource limit on the number of + // queued signals for this process (see the description of + // RLIMIT_SIGPENDING in getr- limit(2)). + SigQ string + // SigPnd: Number of signals pending for thread and for (see pthreads(7)). + SigPnd string + // ShdPnd: Number of signals pending for process as a whole (see + // signal(7)). + ShdPnd string + // SigBlk: Mask indicating signals being blocked (see signal(7)). + SigBlk string + // SigIgn: Mask indicating signals being ignored (see signal(7)). + SigIgn string + // SigCgt: Mask indicating signals being blocked caught (see signal(7)). + SigCgt string + // CapInh: Mask of capabilities enabled in inheritable sets (see + // capabilities(7)). + CapInh string + // CapPrm: Mask of capabilities enabled in permitted sets (see + // capabilities(7)). + CapPrm string + // CapEff: Mask of capabilities enabled in effective sets (see + // capabilities(7)). + CapEff string + // CapBnd: Capability Bounding set (since Linux 2.6.26, see + // capabilities(7)). + CapBnd string + // CapAmb: Ambient capability set (since Linux 4.3, see capabilities(7)). + CapAmb string + // NoNewPrivs: Value of the no_new_privs bit (since Linux 4.10, see + // prctl(2)). + NoNewPrivs string + // Seccomp: Seccomp mode of the process (since Linux 3.8, see + // seccomp(2)). 0 means SEC- COMP_MODE_DISABLED; 1 means + // SECCOMP_MODE_STRICT; 2 means SECCOMP_MODE_FILTER. This field is + // provided only if the kernel was built with the CONFIG_SECCOMP kernel + // configu- ration option enabled. + Seccomp string + // Cpus_allowed: Mask of CPUs on which this process may run + // (since Linux 2.6.24, see cpuset(7)). + CpusAllowed string + // Cpus_allowed_list: Same as previous, but in "list format" (since + // Linux 2.6.26, see cpuset(7)). + CpusAllowedList string + // Mems_allowed: Mask of memory nodes allowed to this process + // (since Linux 2.6.24, see cpuset(7)). + MemsAllowed string + // Mems_allowed_list: Same as previous, but in "list format" (since + // Linux 2.6.26, see cpuset(7)). + MemsAllowedList string + // voluntaryCtxtSwitches: Number of voluntary context switches + // (since Linux 2.6.23). + VoluntaryCtxtSwitches string + // nonvoluntaryCtxtSwitches: Number of involuntary context switches + // (since Linux 2.6.23). + NonvoluntaryCtxtSwitches string +} + +// readStatus returns the content of /proc/pid/status as a string slice. +func readStatus(pid string) ([]string, error) { + path := fmt.Sprintf("/proc/%s/status", pid) + f, err := os.Open(path) + if err != nil { + return nil, err + } + lines := []string{} + scanner := bufio.NewScanner(f) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, nil +} + +// mapField maps a single string-typed ID field given the set of mappings. If +// no mapping exists, the overflow uid/gid is used. +func mapStatusField(field *string, mapping []idtools.IDMap, overflow string) { + hostId, err := strconv.Atoi(*field) + if err != nil { + *field = overflow + return + } + contId, err := idtools.RawToContainer(hostId, mapping) + if err != nil { + *field = overflow + return + } + *field = strconv.Itoa(contId) +} + +var ( + overflowOnce sync.Once + overflowUid = "65534" + overflowGid = "65534" +) + +func overflowIds() (string, string) { + overflowOnce.Do(func() { + if uid, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + overflowUid = strings.TrimSpace(string(uid)) + } + if gid, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + overflowGid = strings.TrimSpace(string(gid)) + } + }) + return overflowUid, overflowGid +} + +// mapStatus takes a Status struct and remaps all of the relevant fields to +// match the user namespace of the target process. +func mapStatus(pid string, status *Status) (*Status, error) { + uidMap, err := ReadMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, err + } + gidMap, err := ReadMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, err + } + overflowUid, overflowGid := overflowIds() + for i := range status.Uids { + mapStatusField(&status.Uids[i], uidMap, overflowUid) + } + for i := range status.Gids { + mapStatusField(&status.Gids[i], gidMap, overflowGid) + } + for i := range status.Groups { + mapStatusField(&status.Groups[i], gidMap, overflowGid) + } + return status, nil +} + +// ParseStatus parses the /proc/$pid/status file and returns a *Status. +func ParseStatus(pid string, mapUserNS bool) (*Status, error) { + lines, err := readStatus(pid) + if err != nil { + return nil, err + } + status, err := parseStatus(pid, lines) + if err != nil { + return nil, err + } + if mapUserNS { + status, err = mapStatus(pid, status) + if err != nil { + return nil, err + } + } + return status, nil +} + +// parseStatus extracts data from lines and returns a *Status. +func parseStatus(pid string, lines []string) (*Status, error) { + s := Status{} + errUnexpectedInput := fmt.Errorf("unexpected input from /proc/%s/status", pid) + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) < 2 { + continue + } + + switch fields[0] { + case "Name:": + s.Name = fields[1] + case "Umask:": + s.Umask = fields[1] + case "State:": + s.State = fields[1] + case "Tgid:": + s.Tgid = fields[1] + case "Ngid:": + s.Ngid = fields[1] + case "Pid:": + s.Pid = fields[1] + case "PPid:": + s.PPid = fields[1] + case "TracerPid:": + s.TracerPid = fields[1] + case "Uid:": + if len(fields) != 5 { + return nil, fmt.Errorf(line+": %w", errUnexpectedInput) + } + s.Uids = []string{fields[1], fields[2], fields[3], fields[4]} + case "Gid:": + if len(fields) != 5 { + return nil, fmt.Errorf(line+": %w", errUnexpectedInput) + } + s.Gids = []string{fields[1], fields[2], fields[3], fields[4]} + case "FDSize:": + s.FdSize = fields[1] + case "Groups:": + s.Groups = fields[1:] + case "NStgid:": + s.NStgid = fields[1] + case "NSpid:": + s.NSpid = fields[1:] + case "NSpgid:": + s.NSpgid = fields[1] + case "NSsid:": + s.NSsid = fields[1] + case "VmPeak:": + s.VMPeak = fields[1] + case "VmSize:": + s.VMSize = fields[1] + case "VmLck:": + s.VMLCK = fields[1] + case "VmPin:": + s.VMPin = fields[1] + case "VmHWM:": + s.VMHWM = fields[1] + case "VmRSS:": + s.VMRSS = fields[1] + case "RssAnon:": + s.RssAnon = fields[1] + case "RssFile:": + s.RssFile = fields[1] + case "RssShmem:": + s.RssShmem = fields[1] + case "VmData:": + s.VMData = fields[1] + case "VmStk:": + s.VMStk = fields[1] + case "VmExe:": + s.VMExe = fields[1] + case "VmLib:": + s.VMLib = fields[1] + case "VmPTE:": + s.VMPTE = fields[1] + case "VmPMD:": + s.VMPMD = fields[1] + case "VmSwap:": + s.VMSwap = fields[1] + case "HugetlbPages:": + s.HugetlbPages = fields[1] + case "Threads:": + s.Threads = fields[1] + case "SigQ:": + s.SigQ = fields[1] + case "SigPnd:": + s.SigPnd = fields[1] + case "ShdPnd:": + s.ShdPnd = fields[1] + case "SigBlk:": + s.SigBlk = fields[1] + case "SigIgn:": + s.SigIgn = fields[1] + case "SigCgt:": + s.SigCgt = fields[1] + case "CapInh:": + s.CapInh = fields[1] + case "CapPrm:": + s.CapPrm = fields[1] + case "CapEff:": + s.CapEff = fields[1] + case "CapBnd:": + s.CapBnd = fields[1] + case "CapAmb:": + s.CapAmb = fields[1] + case "NoNewPrivs:": + s.NoNewPrivs = fields[1] + case "Seccomp:": + s.Seccomp = fields[1] + case "Cpus_allowed:": + s.CpusAllowed = fields[1] + case "Cpus_allowed_list:": + s.CpusAllowedList = fields[1] + case "Mems_allowed:": + s.MemsAllowed = fields[1] + case "Mems_allowed_list:": + s.MemsAllowedList = fields[1] + case "voluntary_ctxt_switches:": + s.VoluntaryCtxtSwitches = fields[1] + case "nonvoluntary_ctxt_switches:": + s.NonvoluntaryCtxtSwitches = fields[1] + } + } + + return &s, nil +} diff --git a/vendor/github.com/containers/psgo/internal/process/process.go b/vendor/github.com/containers/psgo/internal/process/process.go new file mode 100644 index 00000000000..71503961090 --- /dev/null +++ b/vendor/github.com/containers/psgo/internal/process/process.go @@ -0,0 +1,235 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package process + +import ( + "errors" + "fmt" + "os" + "strconv" + "time" + + "github.com/containers/psgo/internal/host" + "github.com/containers/psgo/internal/proc" + "github.com/opencontainers/runc/libcontainer/user" +) + +// Process includes process-related from the /proc FS. +type Process struct { + // PID is the process ID. + Pid string + // Stat contains data from /proc/$pid/stat. + Stat proc.Stat + // Status contains data from /proc/$pid/status. + Status proc.Status + // CmdLine contains data from /proc/$pid/cmdline. + CmdLine []string + // Label containers data from /proc/$pid/attr/current. + Label string + // PidNS contains data from /proc/$pid/ns/pid. + PidNS string + // Huser is the effective host user of a container process. + Huser string + // Hgroup is the effective host group of a container process. + Hgroup string +} + +// LookupGID returns the textual group ID, if it can be obtained, or the +// decimal representation otherwise. +func LookupGID(gid string) (string, error) { + gidNum, err := strconv.Atoi(gid) + if err != nil { + return "", fmt.Errorf("error parsing group ID: %w", err) + } + g, err := user.LookupGid(gidNum) + if err != nil { + return gid, nil + } + return g.Name, nil +} + +// LookupUID return the textual user ID, if it can be obtained, or the decimal +// representation otherwise. +func LookupUID(uid string) (string, error) { + uidNum, err := strconv.Atoi(uid) + if err != nil { + return "", fmt.Errorf("error parsing user ID: %w", err) + } + u, err := user.LookupUid(uidNum) + if err != nil { + return uid, nil + } + return u.Name, nil +} + +// New returns a new Process with the specified pid and parses the relevant +// data from /proc and /dev. +func New(pid string, joinUserNS bool) (*Process, error) { + p := Process{Pid: pid} + + if err := p.parseStat(); err != nil { + return nil, err + } + if err := p.parseStatus(joinUserNS); err != nil { + return nil, err + } + if err := p.parseCmdLine(); err != nil { + return nil, err + } + if err := p.parsePIDNamespace(); err != nil { + // Ignore permission errors as those occur for some pids when + // the caller has limited permissions. + if !os.IsPermission(err) { + return nil, err + } + } + if err := p.parseLabel(); err != nil { + return nil, err + } + + return &p, nil +} + +// FromPIDs creates a new Process for each pid. +func FromPIDs(pids []string, joinUserNS bool) ([]*Process, error) { + processes := []*Process{} + for _, pid := range pids { + p, err := New(pid, joinUserNS) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // proc parsing is racy + // Let's ignore "does not exist" errors + continue + } + return nil, err + } + processes = append(processes, p) + } + return processes, nil +} + +// parseStat parses /proc/$pid/stat. +func (p *Process) parseStat() error { + s, err := proc.ParseStat(p.Pid) + if err != nil { + return err + } + p.Stat = *s + return nil +} + +// parseStatus parses /proc/$pid/status. +func (p *Process) parseStatus(joinUserNS bool) error { + s, err := proc.ParseStatus(p.Pid, joinUserNS) + if err != nil { + return err + } + p.Status = *s + return nil +} + +// parseCmdLine parses /proc/$pid/cmdline. +func (p *Process) parseCmdLine() error { + s, err := proc.ParseCmdLine(p.Pid) + if err != nil { + return err + } + p.CmdLine = s + return nil +} + +// parsePIDNamespace sets the PID namespace. +func (p *Process) parsePIDNamespace() error { + pidNS, err := proc.ParsePIDNamespace(p.Pid) + if err != nil { + return err + } + p.PidNS = pidNS + return nil +} + +// parseLabel parses the security label. +func (p *Process) parseLabel() error { + label, err := proc.ParseAttrCurrent(p.Pid) + if err != nil { + return err + } + p.Label = label + return nil +} + +// SetHostData sets all host-related data fields. +func (p *Process) SetHostData() error { + var err error + + p.Huser, err = LookupUID(p.Status.Uids[1]) + if err != nil { + return err + } + + p.Hgroup, err = LookupGID(p.Status.Gids[1]) + if err != nil { + return err + } + + return nil +} + +// ElapsedTime returns the time.Duration since process p was created. +func (p *Process) ElapsedTime() (time.Duration, error) { + startTime, err := p.StartTime() + if err != nil { + return 0, err + } + return time.Since(startTime), nil +} + +// StarTime returns the time.Time when process p was started. +func (p *Process) StartTime() (time.Time, error) { + sinceBoot, err := strconv.ParseInt(p.Stat.Starttime, 10, 64) + if err != nil { + return time.Time{}, err + } + clockTicks, err := host.ClockTicks() + if err != nil { + return time.Time{}, err + } + bootTime, err := host.BootTime() + if err != nil { + return time.Time{}, err + } + + sinceBoot = sinceBoot / clockTicks + return time.Unix(sinceBoot+bootTime, 0), nil +} + +// CPUTime returns the cumulative CPU time of process p as a time.Duration. +func (p *Process) CPUTime() (time.Duration, error) { + user, err := strconv.ParseInt(p.Stat.Utime, 10, 64) + if err != nil { + return 0, err + } + system, err := strconv.ParseInt(p.Stat.Stime, 10, 64) + if err != nil { + return 0, err + } + clockTicks, err := host.ClockTicks() + if err != nil { + return 0, err + } + secs := (user + system) / clockTicks + cpu := time.Unix(secs, 0) + return cpu.Sub(time.Unix(0, 0)), nil +} diff --git a/vendor/github.com/containers/psgo/psgo.go b/vendor/github.com/containers/psgo/psgo.go new file mode 100644 index 00000000000..d6cfcef4dc0 --- /dev/null +++ b/vendor/github.com/containers/psgo/psgo.go @@ -0,0 +1,906 @@ +// Copyright 2018 psgo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package psgo is a ps (1) AIX-format compatible golang library extended with +// various descriptors useful for displaying container-related data. +// +// The idea behind the library is to provide an easy to use way of extracting +// process-related data, just as ps (1) does. The problem when using ps (1) is +// that the ps format strings split columns with whitespaces, making the output +// nearly impossible to parse. It also adds some jitter as we have to fork and +// execute ps either in the container or filter the output afterwards, further +// limiting applicability. +// +// Please visit https://github.com/containers/psgo for further details about +// supported format descriptors and to see some usage examples. +package psgo + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + + "github.com/containers/psgo/internal/capabilities" + "github.com/containers/psgo/internal/dev" + "github.com/containers/psgo/internal/proc" + "github.com/containers/psgo/internal/process" + "github.com/containers/storage/pkg/idtools" + "golang.org/x/sys/unix" +) + +// JoinNamespaceOpts specifies different options for joining the specified namespaces. +type JoinNamespaceOpts struct { + // UIDMap specifies a mapping for UIDs in the container. If specified + // huser will perform the reverse mapping. + UIDMap []idtools.IDMap + // GIDMap specifies a mapping for GIDs in the container. If specified + // hgroup will perform the reverse mapping. + GIDMap []idtools.IDMap + + // FillMappings specified whether UIDMap and GIDMap must be initialized + // with the current user namespace. + FillMappings bool +} + +type psContext struct { + // Processes in the container. + containersProcesses []*process.Process + // Processes on the host. Used to map those to the ones running in the container. + hostProcesses []*process.Process + // tty and pty devices. + ttys *[]dev.TTY + // Various options + opts *JoinNamespaceOpts +} + +// processFunc is used to map a given aixFormatDescriptor to a corresponding +// function extracting the desired data from a process. +type processFunc func(*process.Process, *psContext) (string, error) + +// aixFormatDescriptor as mentioned in the ps(1) manpage. A given descriptor +// can either be specified via its code (e.g., "%C") or its normal representation +// (e.g., "pcpu") and will be printed under its corresponding header (e.g, "%CPU"). +type aixFormatDescriptor struct { + // code descriptor in the short form (e.g., "%C"). + code string + // normal descriptor in the long form (e.g., "pcpu"). + normal string + // header of the descriptor (e.g., "%CPU"). + header string + // onHost controls if data of the corresponding host processes will be + // extracted as well. + onHost bool + // procFN points to the corresponding method to extract the desired data. + procFn processFunc +} + +// findID converts the specified id to the host mapping +func findID(idStr string, mapping []idtools.IDMap, lookupFunc func(uid string) (string, error), overflowFile string) (string, error) { + if len(mapping) == 0 { + return idStr, nil + } + + id, err := strconv.ParseInt(idStr, 10, 0) + if err != nil { + return "", fmt.Errorf("cannot parse ID: %w", err) + } + for _, m := range mapping { + if int(id) >= m.ContainerID && int(id) < m.ContainerID+m.Size { + user := fmt.Sprintf("%d", m.HostID+(int(id)-m.ContainerID)) + + return lookupFunc(user) + } + } + + // User not found, read the overflow + overflow, err := ioutil.ReadFile(overflowFile) + if err != nil { + return "", err + } + return string(overflow), nil +} + +// translateDescriptors parses the descriptors and returns a correspodning slice of +// aixFormatDescriptors. Descriptors can be specified in the normal and in the +// code form (if supported). If the descriptors slice is empty, the +// `DefaultDescriptors` is used. +func translateDescriptors(descriptors []string) ([]aixFormatDescriptor, error) { + if len(descriptors) == 0 { + descriptors = DefaultDescriptors + } + + formatDescriptors := []aixFormatDescriptor{} + for _, d := range descriptors { + d = strings.TrimSpace(d) + found := false + for _, aix := range aixFormatDescriptors { + if d == aix.code || d == aix.normal { + formatDescriptors = append(formatDescriptors, aix) + found = true + } + } + if !found { + return nil, fmt.Errorf("'%s': %w", d, ErrUnknownDescriptor) + } + } + + return formatDescriptors, nil +} + +var ( + // DefaultDescriptors is the `ps -ef` compatible default format. + DefaultDescriptors = []string{"user", "pid", "ppid", "pcpu", "etime", "tty", "time", "args"} + + // ErrUnknownDescriptor is returned when an unknown descriptor is parsed. + ErrUnknownDescriptor = errors.New("unknown descriptor") + + aixFormatDescriptors = []aixFormatDescriptor{ + { + code: "%C", + normal: "pcpu", + header: "%CPU", + procFn: processPCPU, + }, + { + code: "%G", + normal: "group", + header: "GROUP", + procFn: processGROUP, + }, + { + normal: "groups", + header: "GROUPS", + procFn: processGROUPS, + }, + { + code: "%P", + normal: "ppid", + header: "PPID", + procFn: processPPID, + }, + { + code: "%U", + normal: "user", + header: "USER", + procFn: processUSER, + }, + { + code: "%a", + normal: "args", + header: "COMMAND", + procFn: processARGS, + }, + { + code: "%c", + normal: "comm", + header: "COMMAND", + procFn: processCOMM, + }, + { + code: "%g", + normal: "rgroup", + header: "RGROUP", + procFn: processRGROUP, + }, + { + code: "%n", + normal: "nice", + header: "NI", + procFn: processNICE, + }, + { + code: "%p", + normal: "pid", + header: "PID", + procFn: processPID, + }, + { + code: "%r", + normal: "pgid", + header: "PGID", + procFn: processPGID, + }, + { + code: "%t", + normal: "etime", + header: "ELAPSED", + procFn: processETIME, + }, + { + code: "%u", + normal: "ruser", + header: "RUSER", + procFn: processRUSER, + }, + { + code: "%x", + normal: "time", + header: "TIME", + procFn: processTIME, + }, + { + code: "%y", + normal: "tty", + header: "TTY", + procFn: processTTY, + }, + { + code: "%z", + normal: "vsz", + header: "VSZ", + procFn: processVSZ, + }, + { + normal: "capamb", + header: "AMBIENT CAPS", + procFn: processCAPAMB, + }, + { + normal: "capinh", + header: "INHERITED CAPS", + procFn: processCAPINH, + }, + { + normal: "capprm", + header: "PERMITTED CAPS", + procFn: processCAPPRM, + }, + { + normal: "capeff", + header: "EFFECTIVE CAPS", + procFn: processCAPEFF, + }, + { + normal: "capbnd", + header: "BOUNDING CAPS", + procFn: processCAPBND, + }, + { + normal: "seccomp", + header: "SECCOMP", + procFn: processSECCOMP, + }, + { + normal: "label", + header: "LABEL", + procFn: processLABEL, + }, + { + normal: "hpid", + header: "HPID", + onHost: true, + procFn: processHPID, + }, + { + normal: "huser", + header: "HUSER", + onHost: true, + procFn: processHUSER, + }, + { + normal: "hgroup", + header: "HGROUP", + onHost: true, + procFn: processHGROUP, + }, + { + normal: "hgroups", + header: "HGROUPS", + onHost: true, + procFn: processHGROUPS, + }, + { + normal: "rss", + header: "RSS", + procFn: processRSS, + }, + { + normal: "state", + header: "STATE", + procFn: processState, + }, + { + normal: "stime", + header: "STIME", + procFn: processStartTime, + }, + } +) + +// ListDescriptors returns a string slice of all supported AIX format +// descriptors in the normal form. +func ListDescriptors() (list []string) { + for _, d := range aixFormatDescriptors { + list = append(list, d.normal) + } + sort.Strings(list) + return +} + +// JoinNamespaceAndProcessInfo has the same semantics as ProcessInfo but joins +// the mount namespace of the specified pid before extracting data from `/proc`. +func JoinNamespaceAndProcessInfo(pid string, descriptors []string) ([][]string, error) { + return JoinNamespaceAndProcessInfoWithOptions(pid, descriptors, &JoinNamespaceOpts{}) +} + +func contextFromOptions(options *JoinNamespaceOpts) (*psContext, error) { + ctx := new(psContext) + ctx.opts = options + if ctx.opts != nil && ctx.opts.FillMappings { + uidMappings, err := proc.ReadMappings("/proc/self/uid_map") + if err != nil { + return nil, err + } + + gidMappings, err := proc.ReadMappings("/proc/self/gid_map") + if err != nil { + return nil, err + } + ctx.opts.UIDMap = uidMappings + ctx.opts.GIDMap = gidMappings + + ctx.opts.FillMappings = false + } + return ctx, nil +} + +// JoinNamespaceAndProcessInfoWithOptions has the same semantics as ProcessInfo but joins +// the mount namespace of the specified pid before extracting data from `/proc`. +func JoinNamespaceAndProcessInfoWithOptions(pid string, descriptors []string, options *JoinNamespaceOpts) ([][]string, error) { + var ( + data [][]string + dataErr error + wg sync.WaitGroup + ) + + aixDescriptors, err := translateDescriptors(descriptors) + if err != nil { + return nil, err + } + + ctx, err := contextFromOptions(options) + if err != nil { + return nil, err + } + + // extract data from host processes only on-demand / when at least one + // of the specified descriptors requires host data + for _, d := range aixDescriptors { + if d.onHost { + ctx.hostProcesses, err = hostProcesses(pid) + if err != nil { + return nil, err + } + break + } + } + + wg.Add(1) + go func() { + defer wg.Done() + runtime.LockOSThread() + + // extract user namespaces prior to joining the mount namespace + currentUserNs, err := proc.ParseUserNamespace("self") + if err != nil { + dataErr = fmt.Errorf("error determining user namespace: %w", err) + return + } + + pidUserNs, err := proc.ParseUserNamespace(pid) + if err != nil { + dataErr = fmt.Errorf("error determining user namespace of PID %s: %w", pid, err) + } + + // join the mount namespace of pid + fd, err := os.Open(fmt.Sprintf("/proc/%s/ns/mnt", pid)) + if err != nil { + dataErr = err + return + } + defer fd.Close() + + // create a new mountns on the current thread + if err = unix.Unshare(unix.CLONE_NEWNS); err != nil { + dataErr = err + return + } + if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil { + dataErr = err + return + } + + // extract all pids mentioned in pid's mount namespace + pids, err := proc.GetPIDs() + if err != nil { + dataErr = err + return + } + + // join the user NS if the pid's user NS is different + // to the caller's user NS. + joinUserNS := currentUserNs != pidUserNs + + ctx.containersProcesses, err = process.FromPIDs(pids, joinUserNS) + if err != nil { + dataErr = err + return + } + + data, dataErr = processDescriptors(aixDescriptors, ctx) + }() + wg.Wait() + + return data, dataErr +} + +// JoinNamespaceAndProcessInfoByPidsWithOptions has similar semantics to +// JoinNamespaceAndProcessInfo and avoids duplicate entries by joining a giving +// PID namespace only once. +func JoinNamespaceAndProcessInfoByPidsWithOptions(pids []string, descriptors []string, options *JoinNamespaceOpts) ([][]string, error) { + // Extracting data from processes that share the same PID namespace + // would yield duplicate results. Avoid that by extracting data only + // from the first process in `pids` from a given PID namespace. + // `nsMap` is used for quick lookups if a given PID namespace is + // already covered, `pidList` is used to preserve the order which is + // not guaranteed by nondeterministic maps in golang. + nsMap := make(map[string]bool) + pidList := []string{} + for _, pid := range pids { + ns, err := proc.ParsePIDNamespace(pid) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // catch race conditions + continue + } + return nil, fmt.Errorf("error extracting PID namespace: %w", err) + } + if _, exists := nsMap[ns]; !exists { + nsMap[ns] = true + pidList = append(pidList, pid) + } + } + + data := [][]string{} + for i, pid := range pidList { + pidData, err := JoinNamespaceAndProcessInfoWithOptions(pid, descriptors, options) + if errors.Is(err, os.ErrNotExist) { + // catch race conditions + continue + } + if err != nil { + return nil, err + } + if i == 0 { + data = append(data, pidData[0]) + } + data = append(data, pidData[1:]...) + } + + return data, nil +} + +// JoinNamespaceAndProcessInfoByPids has similar semantics to +// JoinNamespaceAndProcessInfo and avoids duplicate entries by joining a giving +// PID namespace only once. +func JoinNamespaceAndProcessInfoByPids(pids []string, descriptors []string) ([][]string, error) { + return JoinNamespaceAndProcessInfoByPidsWithOptions(pids, descriptors, &JoinNamespaceOpts{}) +} + +// ProcessInfo returns the process information of all processes in the current +// mount namespace. The input format must be a comma-separated list of +// supported AIX format descriptors. If the input string is empty, the +// `DefaultDescriptors` is used. +// The return value is an array of tab-separated strings, to easily use the +// output for column-based formatting (e.g., with the `text/tabwriter` package). +func ProcessInfo(descriptors []string) ([][]string, error) { + pids, err := proc.GetPIDs() + if err != nil { + return nil, err + } + + return ProcessInfoByPids(pids, descriptors) +} + +// ProcessInfoByPids is like ProcessInfo, but the process information returned +// is limited to a list of user specified PIDs. +func ProcessInfoByPids(pids []string, descriptors []string) ([][]string, error) { + aixDescriptors, err := translateDescriptors(descriptors) + if err != nil { + return nil, err + } + + ctx, err := contextFromOptions(nil) + if err != nil { + return nil, err + } + ctx.containersProcesses, err = process.FromPIDs(pids, false) + if err != nil { + return nil, err + } + + return processDescriptors(aixDescriptors, ctx) +} + +// hostProcesses returns all processes running in the current namespace. +func hostProcesses(pid string) ([]*process.Process, error) { + // get processes + pids, err := proc.GetPIDsFromCgroup(pid) + if err != nil { + return nil, err + } + + processes, err := process.FromPIDs(pids, false) + if err != nil { + return nil, err + } + + // set the additional host data + for _, p := range processes { + if err := p.SetHostData(); err != nil { + return nil, err + } + } + + return processes, nil +} + +// processDescriptors calls each `procFn` of all formatDescriptors on each +// process and returns an array of tab-separated strings. +func processDescriptors(formatDescriptors []aixFormatDescriptor, ctx *psContext) ([][]string, error) { + data := [][]string{} + // create header + header := []string{} + for _, desc := range formatDescriptors { + header = append(header, desc.header) + } + data = append(data, header) + + // dispatch all descriptor functions on each process + for _, proc := range ctx.containersProcesses { + pData := []string{} + for _, desc := range formatDescriptors { + dataStr, err := desc.procFn(proc, ctx) + if err != nil { + return nil, err + } + pData = append(pData, dataStr) + } + data = append(data, pData) + } + + return data, nil +} + +// findHostProcess returns the corresponding process from `hostProcesses` or +// nil if non is found. +func findHostProcess(p *process.Process, ctx *psContext) *process.Process { + for _, hp := range ctx.hostProcesses { + // We expect the host process to be in another namespace, so + // /proc/$pid/status.NSpid must have at least two entries. + if len(hp.Status.NSpid) < 2 { + continue + } + // The process' PID must match the one in the NS of the host + // process and both must share the same pid NS. + if p.Pid == hp.Status.NSpid[1] && p.PidNS == hp.PidNS { + return hp + } + } + return nil +} + +// processGROUP returns the effective group ID of the process. This will be +// the textual group ID, if it can be obtained, or a decimal representation +// otherwise. +func processGROUP(p *process.Process, ctx *psContext) (string, error) { + return process.LookupGID(p.Status.Gids[1]) +} + +// processGROUPS returns the supplementary groups of the process separated by +// comma. This will be the textual group ID, if it can be obtained, or a +// decimal representation otherwise. +func processGROUPS(p *process.Process, ctx *psContext) (string, error) { + var err error + groups := make([]string, len(p.Status.Groups)) + for i, g := range p.Status.Groups { + groups[i], err = process.LookupGID(g) + if err != nil { + return "", err + } + } + return strings.Join(groups, ","), nil +} + +// processRGROUP returns the real group ID of the process. This will be +// the textual group ID, if it can be obtained, or a decimal representation +// otherwise. +func processRGROUP(p *process.Process, ctx *psContext) (string, error) { + return process.LookupGID(p.Status.Gids[0]) +} + +// processPPID returns the parent process ID of process p. +func processPPID(p *process.Process, ctx *psContext) (string, error) { + return p.Status.PPid, nil +} + +// processUSER returns the effective user name of the process. This will be +// the textual user ID, if it can be obtained, or a decimal representation +// otherwise. +func processUSER(p *process.Process, ctx *psContext) (string, error) { + return process.LookupUID(p.Status.Uids[1]) +} + +// processRUSER returns the effective user name of the process. This will be +// the textual user ID, if it can be obtained, or a decimal representation +// otherwise. +func processRUSER(p *process.Process, ctx *psContext) (string, error) { + return process.LookupUID(p.Status.Uids[0]) +} + +// processName returns the name of process p in the format "[$name]". +func processName(p *process.Process, ctx *psContext) (string, error) { + return fmt.Sprintf("[%s]", p.Status.Name), nil +} + +// processARGS returns the command of p with all its arguments. +func processARGS(p *process.Process, ctx *psContext) (string, error) { + // ps (1) returns "[$name]" if command/args are empty + if p.CmdLine[0] == "" { + return processName(p, ctx) + } + return strings.Join(p.CmdLine, " "), nil +} + +// processCOMM returns the command name (i.e., executable name) of process p. +func processCOMM(p *process.Process, ctx *psContext) (string, error) { + return p.Stat.Comm, nil +} + +// processNICE returns the nice value of process p. +func processNICE(p *process.Process, ctx *psContext) (string, error) { + return p.Stat.Nice, nil +} + +// processPID returns the process ID of process p. +func processPID(p *process.Process, ctx *psContext) (string, error) { + return p.Pid, nil +} + +// processPGID returns the process group ID of process p. +func processPGID(p *process.Process, ctx *psContext) (string, error) { + return p.Stat.Pgrp, nil +} + +// processPCPU returns how many percent of the CPU time process p uses as +// a three digit float as string. +func processPCPU(p *process.Process, ctx *psContext) (string, error) { + elapsed, err := p.ElapsedTime() + if err != nil { + return "", err + } + cpu, err := p.CPUTime() + if err != nil { + return "", err + } + pcpu := 100 * cpu.Seconds() / elapsed.Seconds() + + return strconv.FormatFloat(pcpu, 'f', 3, 64), nil +} + +// processETIME returns the elapsed time since the process was started. +func processETIME(p *process.Process, ctx *psContext) (string, error) { + elapsed, err := p.ElapsedTime() + if err != nil { + return "", nil + } + return fmt.Sprintf("%v", elapsed), nil +} + +// processTIME returns the cumulative CPU time of process p. +func processTIME(p *process.Process, ctx *psContext) (string, error) { + cpu, err := p.CPUTime() + if err != nil { + return "", err + } + return fmt.Sprintf("%v", cpu), nil +} + +// processStartTime returns the start time of process p. +func processStartTime(p *process.Process, ctx *psContext) (string, error) { + sTime, err := p.StartTime() + if err != nil { + return "", err + } + return fmt.Sprintf("%v", sTime), nil +} + +// processTTY returns the controlling tty (terminal) of process p. +func processTTY(p *process.Process, ctx *psContext) (string, error) { + ttyNr, err := strconv.ParseUint(p.Stat.TtyNr, 10, 64) + if err != nil { + return "", nil + } + + tty, err := dev.FindTTY(ttyNr, ctx.ttys) + if err != nil { + return "", nil + } + + ttyS := "?" + if tty != nil { + ttyS = strings.TrimPrefix(tty.Path, "/dev/") + } + return ttyS, nil +} + +// processVSZ returns the virtual memory size of process p in KiB (1024-byte +// units). +func processVSZ(p *process.Process, ctx *psContext) (string, error) { + vmsize, err := strconv.Atoi(p.Stat.Vsize) + if err != nil { + return "", err + } + return fmt.Sprintf("%d", vmsize/1024), nil +} + +// parseCAP parses cap (a string bit mask) and returns the associated set of +// capabilities. If all capabilities are set, "full" is returned. If no +// capability is enabled, "none" is returned. +func parseCAP(cap string) (string, error) { + mask, err := strconv.ParseUint(cap, 16, 64) + if err != nil { + return "", err + } + if mask == capabilities.FullCAPs { + return "full", nil + } + caps := capabilities.TranslateMask(mask) + if len(caps) == 0 { + return "none", nil + } + sort.Strings(caps) + return strings.Join(caps, ","), nil +} + +// processCAPAMB returns the set of ambient capabilities associated with +// process p. If all capabilities are set, "full" is returned. If no +// capability is enabled, "none" is returned. +func processCAPAMB(p *process.Process, ctx *psContext) (string, error) { + return parseCAP(p.Status.CapAmb) +} + +// processCAPINH returns the set of inheritable capabilities associated with +// process p. If all capabilities are set, "full" is returned. If no +// capability is enabled, "none" is returned. +func processCAPINH(p *process.Process, ctx *psContext) (string, error) { + return parseCAP(p.Status.CapInh) +} + +// processCAPPRM returns the set of permitted capabilities associated with +// process p. If all capabilities are set, "full" is returned. If no +// capability is enabled, "none" is returned. +func processCAPPRM(p *process.Process, ctx *psContext) (string, error) { + return parseCAP(p.Status.CapPrm) +} + +// processCAPEFF returns the set of effective capabilities associated with +// process p. If all capabilities are set, "full" is returned. If no +// capability is enabled, "none" is returned. +func processCAPEFF(p *process.Process, ctx *psContext) (string, error) { + return parseCAP(p.Status.CapEff) +} + +// processCAPBND returns the set of bounding capabilities associated with +// process p. If all capabilities are set, "full" is returned. If no +// capability is enabled, "none" is returned. +func processCAPBND(p *process.Process, ctx *psContext) (string, error) { + return parseCAP(p.Status.CapBnd) +} + +// processSECCOMP returns the seccomp mode of the process (i.e., disabled, +// strict or filter) or "?" if /proc/$pid/status.seccomp has a unknown value. +func processSECCOMP(p *process.Process, ctx *psContext) (string, error) { + switch p.Status.Seccomp { + case "0": + return "disabled", nil + case "1": + return "strict", nil + case "2": + return "filter", nil + default: + return "?", nil + } +} + +// processLABEL returns the process label of process p or "?" if the system +// doesn't support labeling. +func processLABEL(p *process.Process, ctx *psContext) (string, error) { + return p.Label, nil +} + +// processHPID returns the PID of the corresponding host process of the +// (container) or "?" if no corresponding process could be found. +func processHPID(p *process.Process, ctx *psContext) (string, error) { + if hp := findHostProcess(p, ctx); hp != nil { + return hp.Pid, nil + } + return "?", nil +} + +// processHUSER returns the effective user ID of the corresponding host process +// of the (container) or "?" if no corresponding process could be found. +func processHUSER(p *process.Process, ctx *psContext) (string, error) { + if hp := findHostProcess(p, ctx); hp != nil { + if ctx.opts != nil && len(ctx.opts.UIDMap) > 0 { + return findID(hp.Status.Uids[1], ctx.opts.UIDMap, process.LookupUID, "/proc/sys/fs/overflowuid") + } + return hp.Huser, nil + } + return "?", nil +} + +// processHGROUP returns the effective group ID of the corresponding host +// process of the (container) or "?" if no corresponding process could be +// found. +func processHGROUP(p *process.Process, ctx *psContext) (string, error) { + if hp := findHostProcess(p, ctx); hp != nil { + if ctx.opts != nil && len(ctx.opts.GIDMap) > 0 { + return findID(hp.Status.Gids[1], ctx.opts.GIDMap, process.LookupGID, "/proc/sys/fs/overflowgid") + } + return hp.Hgroup, nil + } + return "?", nil +} + +// processHGROUPS returns the supplementary groups of the corresponding host +// process of the (container) or "?" if no corresponding process could be +// found. +func processHGROUPS(p *process.Process, ctx *psContext) (string, error) { + if hp := findHostProcess(p, ctx); hp != nil { + groups := hp.Status.Groups + if ctx.opts != nil && len(ctx.opts.GIDMap) > 0 { + var err error + for i, g := range groups { + groups[i], err = findID(g, ctx.opts.GIDMap, process.LookupGID, "/proc/sys/fs/overflowgid") + if err != nil { + return "", err + } + } + } + return strings.Join(groups, ","), nil + } + return "?", nil +} + +// processRSS returns the resident set size of process p in KiB (1024-byte +// units). +func processRSS(p *process.Process, ctx *psContext) (string, error) { + if p.Status.VMRSS == "" { + // probably a kernel thread + return "0", nil + } + return p.Status.VMRSS, nil +} + +// processState returns the process state of process p. +func processState(p *process.Process, ctx *psContext) (string, error) { + return p.Status.State, nil +} diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml new file mode 100644 index 00000000000..fd3d310548d --- /dev/null +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -0,0 +1,177 @@ +--- + +# Main collection of env. vars to set for all tasks and scripts. +env: + #### + #### Global variables used for all tasks + #### + # Overrides default location (/tmp/cirrus) for repo clone + CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/storage" + # Shell used to execute all script commands + CIRRUS_SHELL: "/bin/bash" + # Automation script path relative to $CIRRUS_WORKING_DIR) + SCRIPT_BASE: "./contrib/cirrus" + # No need to go crazy, but grab enough to cover most PRs + CIRRUS_CLONE_DEPTH: 50 + + #### + #### Cache-image names to test with (double-quotes around names are critical) + ### + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" + UBUNTU_NAME: "ubuntu-2104" + + # GCE project where images live + IMAGE_PROJECT: "libpod-218412" + # VM Image built in containers/automation_images + IMAGE_SUFFIX: "c4512539143831552" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" + + #### + #### Command variables to help avoid duplication + #### + # Command to prefix every output line with a timestamp + # (can't do inline awk script, Cirrus-CI or YAML mangles quoting) + _TIMESTAMP: 'awk --file ${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/timestamp.awk' + _DFCMD: 'df -lhTx tmpfs' + _RAUDITCMD: 'cat /var/log/audit/audit.log' + _UAUDITCMD: 'cat /var/log/kern.log' + _JOURNALCMD: 'journalctl -b' + +gcp_credentials: ENCRYPTED[c87717f04fb15499d19a3b3fa0ad2cdedecc047e82967785d101e9bc418e93219f755e662feac8390088a2df1a4d8464] + +# Default timeout for each task +timeout_in: 120m + +# Default VM to use unless set or modified by task +gce_instance: + image_project: "${IMAGE_PROJECT}" + zone: "us-central1-b" # Required by Cirrus for the time being + cpu: 2 + memory: "4Gb" + disk: 200 + image_name: "${FEDORA_CACHE_IMAGE_NAME}" + + +fedora_testing_task: &fedora_testing + alias: fedora_testing + name: &std_test_name "${OS_NAME} ${TEST_DRIVER}" + depends_on: + - lint + + gce_instance: # Only need to specify differences from defaults (above) + image_name: "${VM_IMAGE}" + + env: + OS_NAME: "${FEDORA_NAME}" + VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}" + + # Not all $TEST_DRIVER combinations valid for all $VM_IMAGE types. + matrix: &test_matrix + - env: + TEST_DRIVER: "vfs" + - env: + TEST_DRIVER: "overlay" + - env: + TEST_DRIVER: "fuse-overlay" + - env: + TEST_DRIVER: "fuse-overlay-whiteout" + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}' + + always: + df_script: '${_DFCMD} || true' + rh_audit_log_script: '${_RAUDITCMD} || true' + ubuntu_audit_log_script: '${_UAUDITCMD} || true' + journal_log_script: '${_JOURNALCMD} || true' + + +prior_fedora_testing_task: + <<: *fedora_testing + alias: prior_fedora_testing + name: *std_test_name + env: + OS_NAME: "${PRIOR_FEDORA_NAME}" + VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + + +ubuntu_testing_task: &ubuntu_testing + <<: *fedora_testing + alias: ubuntu_testing + name: *std_test_name + env: + OS_NAME: "${UBUNTU_NAME}" + VM_IMAGE: "${UBUNTU_CACHE_IMAGE_NAME}" + matrix: + - env: + TEST_DRIVER: "vfs" + - env: + TEST_DRIVER: "overlay" + + +lint_task: + env: + CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" + container: + image: golang:1.16 + modules_cache: + fingerprint_script: cat go.sum + folder: $GOPATH/pkg/mod + build_script: | + echo "deb http://deb.debian.org/debian stretch-backports main" > /etc/apt/sources.list.d/backports.list + apt-get update + apt-get install -y libbtrfs-dev libdevmapper-dev + test_script: make lint + + +# Update metadata on VM images referenced by this repository state +meta_task: + + container: + image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" + cpu: 1 + memory: 1 + + env: + # Space-separated list of images used by this repository state + IMGNAMES: |- + ${FEDORA_CACHE_IMAGE_NAME} + ${PRIOR_FEDORA_CACHE_IMAGE_NAME} + ${UBUNTU_CACHE_IMAGE_NAME} + BUILDID: "${CIRRUS_BUILD_ID}" + REPOREF: "${CIRRUS_CHANGE_IN_REPO}" + GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826] + GCPNAME: ENCRYPTED[91cf7aa421858b26b67835978d224b4a5c46afcf52a0f1ec1b69a99b248715dc8e92a1b56fde18e092acf256fa80ae9c] + GCPPROJECT: ENCRYPTED[79b0f7eb5958e25bc7095d5d368fa8d94447a43ffacb9c693de438186e2f767b7efe9563d6954297ae4730220e10aa9c] + CIRRUS_CLONE_DEPTH: 1 # source not used + + script: '/usr/local/bin/entrypoint.sh |& ${_TIMESTAMP}' + + +vendor_task: + container: + image: golang:1.16 + modules_cache: + fingerprint_script: cat go.sum + folder: $GOPATH/pkg/mod + build_script: make vendor + test_script: hack/tree_status.sh + + +# Represent overall pass/fail status from required dependent tasks +success_task: + depends_on: + - lint + - fedora_testing + - prior_fedora_testing + - ubuntu_testing + - meta + - vendor + container: + image: golang:1.16 + clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed + script: /bin/true diff --git a/vendor/github.com/containers/storage/.dockerignore b/vendor/github.com/containers/storage/.dockerignore new file mode 100644 index 00000000000..9bd2c021930 --- /dev/null +++ b/vendor/github.com/containers/storage/.dockerignore @@ -0,0 +1,3 @@ +bundles +.gopath +vendor/pkg diff --git a/vendor/github.com/containers/storage/.gitignore b/vendor/github.com/containers/storage/.gitignore new file mode 100644 index 00000000000..99b40fbde50 --- /dev/null +++ b/vendor/github.com/containers/storage/.gitignore @@ -0,0 +1,32 @@ +# containers/storage project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.1 +*.5 +*.exe +*~ +*.orig +*.test +.*.swp +.DS_Store +.idea* +# a .bashrc may be added to customize the build environment +.bashrc +.gopath/ +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +tests/tools/build +vendor/pkg/ +.vagrant +/containers-storage +/containers-storage.* diff --git a/vendor/github.com/containers/storage/.golangci.yml b/vendor/github.com/containers/storage/.golangci.yml new file mode 100644 index 00000000000..cd4638a39c2 --- /dev/null +++ b/vendor/github.com/containers/storage/.golangci.yml @@ -0,0 +1,37 @@ +--- +run: + concurrency: 6 + deadline: 5m +linters: + enable-all: true + disable: + - dogsled + - dupl + - errcheck + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godox + - gomnd + - gosec + - gosimple + - govet + - ineffassign + - lll + - maligned + - misspell + - nakedret + - prealloc + - scopelint + - staticcheck + - structcheck + - stylecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + - wsl diff --git a/vendor/github.com/containers/storage/.mailmap b/vendor/github.com/containers/storage/.mailmap new file mode 100644 index 00000000000..0527b6d84d6 --- /dev/null +++ b/vendor/github.com/containers/storage/.mailmap @@ -0,0 +1,254 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + +Vincent Demeester + +Vishnu Kannan +xlgao-zju xlgao +yuchangchun y00277921 + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + diff --git a/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..f4f7df4b8cb --- /dev/null +++ b/vendor/github.com/containers/storage/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The Containers Storage Project Community Code of Conduct + +The Containers Storage project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/storage/CONTRIBUTING.md b/vendor/github.com/containers/storage/CONTRIBUTING.md new file mode 100644 index 00000000000..5364be769ed --- /dev/null +++ b/vendor/github.com/containers/storage/CONTRIBUTING.md @@ -0,0 +1,144 @@ +# Contributing to Containers/Storage + +We'd love to have you join the community! Below summarizes the processes +that we follow. + +## Topics + +* [Reporting Issues](#reporting-issues) +* [Submitting Pull Requests](#submitting-pull-requests) +* [Communications](#communications) + + +## Reporting Issues + +Before reporting an issue, check our backlog of +[open issues](https://github.com/containers/storage/issues) +to see if someone else has already reported it. If so, feel free to add +your scenario, or additional information, to the discussion. Or simply +"subscribe" to it to be notified when it is updated. + +If you find a new issue with the project we'd love to hear about it! The most +important aspect of a bug report is that it includes enough information for +us to reproduce it. So, please include as much detail as possible and try +to remove the extra stuff that doesn't really relate to the issue itself. +The easier it is for us to reproduce it, the faster it'll be fixed! + +Please don't include any private/sensitive information in your issue! + +## Submitting Pull Requests + +No Pull Request (PR) is too small! Typos, additional comments in the code, +new testcases, bug fixes, new features, more documentation, ... it's all +welcome! + +While bug fixes can first be identified via an "issue", that is not required. +It's ok to just open up a PR with the fix, but make sure you include the same +information you would have included in an issue - like how to reproduce it. + +PRs for new features should include some background on what use cases the +new code is trying to address. When possible and when it makes sense, try to break-up +larger PRs into smaller ones - it's easier to review smaller +code changes. But only if those smaller ones make sense as stand-alone PRs. + +Regardless of the type of PR, all PRs should include: +* well documented code changes +* additional testcases. Ideally, they should fail w/o your code change applied +* documentation changes + +Squash your commits into logical pieces of work that might want to be reviewed +separate from the rest of the PRs. But, squashing down to just one commit is ok +too since in the end the entire PR will be reviewed anyway. When in doubt, +squash. + +PRs that fix issues should include a reference like `Closes #XXXX` in the +commit message so that github will automatically close the referenced issue +when the PR is merged. + + + +### Sign your PRs + +The sign-off is a line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +## Communications + +For general questions, or discussions, please use the +IRC group on `irc.freenode.net` called `container-projects` +that has been setup. + +For discussions around issues/bugs and features, you can use the github +[issues](https://github.com/containers/storage/issues) +and +[PRs](https://github.com/containers/storage/pulls) +tracking system. + + diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile new file mode 100644 index 00000000000..2c1e4a18566 --- /dev/null +++ b/vendor/github.com/containers/storage/Makefile @@ -0,0 +1,125 @@ +export GO111MODULE=off +export GOPROXY=https://proxy.golang.org + +.PHONY: \ + all \ + binary \ + clean \ + cross \ + default \ + docs \ + gccgo \ + help \ + install.tools \ + local-binary \ + local-cross \ + local-gccgo \ + local-test-integration \ + local-test-unit \ + local-validate \ + lint \ + test \ + test-integration \ + test-unit \ + validate \ + vendor + +PACKAGE := github.com/containers/storage +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73 +NATIVETAGS := +AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) +BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) +GO ?= go +TESTFLAGS := $(shell go test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) + +# Go module support: set `-mod=vendor` to use the vendored sources +ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true) + GO:=GO111MODULE=on $(GO) + MOD_VENDOR=-mod=vendor +endif + +RUNINVM := vagrant/runinvm.sh + +default all: local-binary docs local-validate local-cross local-gccgo test-unit test-integration ## validate all checks, build and cross-build\nbinaries and docs, run tests in a VM + +clean: ## remove all built files + $(RM) -f containers-storage containers-storage.* docs/*.1 docs/*.5 + +sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.go pkg/*/*.go pkg/*/*/*.go) +containers-storage: $(sources) ## build using gc on the host + $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage + +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w + +binary local-binary: containers-storage + +local-gccgo: ## build using gccgo on the host + GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage + +local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \ + os=`echo $${target} | cut -f1 -d/` ; \ + arch=`echo $${target} | cut -f2 -d/` ; \ + suffix=$${os}.$${arch} ; \ + echo env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags \"$(NATIVETAGS) $(TAGS)\" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage ; \ + env CGO_ENABLED=0 GOOS=$${os} GOARCH=$${arch} $(GO) build $(MOD_VENDOR) -compiler gc -tags "$(NATIVETAGS) $(TAGS)" $(FLAGS) -o containers-storage.$${suffix} ./cmd/containers-storage || exit 1 ; \ + done + +cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs + $(RUNINVM) $(MAKE) local-$@ + +docs: install.tools ## build the docs on the host + $(MAKE) -C docs docs + +gccgo: ## build using gccgo using VMs + $(RUNINVM) $(MAKE) local-$@ + +test: local-binary ## build the binaries and run the tests using VMs + $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration + +local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) + @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) + +test-unit: local-binary ## run the unit tests using VMs + $(RUNINVM) $(MAKE) local-$@ + +local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) + @cd tests; ./test_runner.bash + +test-integration: local-binary ## run the integration tests using VMs + $(RUNINVM) $(MAKE) local-$@ + +local-validate: ## validate DCO and gofmt on the host + @./hack/git-validation.sh + @./hack/gofmt.sh + +validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs + $(RUNINVM) $(MAKE) local-$@ + +install.tools: + $(MAKE) -C tests/tools + +$(FFJSON): + $(MAKE) -C tests/tools + +install.docs: docs + $(MAKE) -C docs install + +install: install.docs + +lint: install.tools + tests/tools/build/golangci-lint run --build-tags="$(AUTOTAGS) $(TAGS)" + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +vendor-in-container: + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor + +vendor: + $(GO) mod tidy + $(GO) mod vendor + $(GO) mod verify diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md new file mode 100644 index 00000000000..fef46a68931 --- /dev/null +++ b/vendor/github.com/containers/storage/README.md @@ -0,0 +1,46 @@ +`storage` is a Go library which aims to provide methods for storing filesystem +layers, container images, and containers. A `containers-storage` CLI wrapper +is also included for manual and scripting use. + +To build the CLI wrapper, use 'make binary'. + +Operations which use VMs expect to launch them using 'vagrant', defaulting to +using its 'libvirt' provider. The boxes used are also available for the +'virtualbox' provider, and can be selected by setting $VAGRANT_PROVIDER to +'virtualbox' before kicking off the build. + +The library manages three types of items: layers, images, and containers. + +A *layer* is a copy-on-write filesystem which is notionally stored as a set of +changes relative to its *parent* layer, if it has one. A given layer can only +have one parent, but any layer can be the parent of multiple layers. Layers +which are parents of other layers should be treated as read-only. + +An *image* is a reference to a particular layer (its _top_ layer), along with +other information which the library can manage for the convenience of its +caller. This information typically includes configuration templates for +running a binary contained within the image's layers, and may include +cryptographic signatures. Multiple images can reference the same layer, as the +differences between two images may not be in their layer contents. + +A *container* is a read-write layer which is a child of an image's top layer, +along with information which the library can manage for the convenience of its +caller. This information typically includes configuration information for +running the specific container. Multiple containers can be derived from a +single image. + +Layers, images, and containers are represented primarily by 32 character +hexadecimal IDs, but items of each kind can also have one or more arbitrary +names attached to them, which the library will automatically resolve to IDs +when they are passed in to API calls which expect IDs. + +The library can store what it calls *metadata* for each of these types of +items. This is expected to be a small piece of data, since it is cached in +memory and stored along with the library's own bookkeeping information. + +Additionally, the library can store one or more of what it calls *big data* for +images and containers. This is a named chunk of larger data, which is only in +memory when it is being read from or being written to its own disk file. + +**[Contributing](CONTRIBUTING.md)** +Information about contributing to this project. diff --git a/vendor/github.com/containers/storage/SECURITY.md b/vendor/github.com/containers/storage/SECURITY.md new file mode 100644 index 00000000000..ab2c14182fd --- /dev/null +++ b/vendor/github.com/containers/storage/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the Containers Storage Project + +The Containers Storage Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION new file mode 100644 index 00000000000..148dabb457e --- /dev/null +++ b/vendor/github.com/containers/storage/VERSION @@ -0,0 +1 @@ +1.40.2 diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go new file mode 100644 index 00000000000..a8b20f03a01 --- /dev/null +++ b/vendor/github.com/containers/storage/containers.go @@ -0,0 +1,663 @@ +package storage + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// A Container is a reference to a read-write layer with metadata. +type Container struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // container can be referred to by its ID or any of its names. Names + // are unique among containers. + Names []string `json:"names,omitempty"` + + // ImageID is the ID of the image which was used to create the container. + ImageID string `json:"image"` + + // LayerID is the ID of the read-write layer for the container itself. + // It is assumed that the image's top layer is the parent of the container's + // read-write layer. + LayerID string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this container was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // UIDMap and GIDMap are used for setting up a container's root + // filesystem for use inside of a user namespace where UID mapping is + // being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ContainerStore provides bookkeeping for information about Containers. +type ContainerStore interface { + FileBasedStore + MetadataStore + ContainerBigDataStore + FlaggableStore + + // Create creates a container that has a specified ID (or generates a + // random one if an empty value is supplied) and optional names, + // based on the specified image, using the specified layer as its + // read-write layer. + // The maps in the container's options structure are recorded for the + // convenience of the caller, nothing more. + Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // SetNames updates the list of names associated with the container + // with the specified ID. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the supplied values to the list of names associated with the container with + // the specified id. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the container with + // the specified id. + RemoveNames(id string, names []string) error + + // Get retrieves information about a container given an ID or name. + Get(id string) (*Container, error) + + // Exists checks if there is a container with the given ID or name. + Exists(id string) bool + + // Delete removes the record of the container. + Delete(id string) error + + // Wipe removes records of all containers. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Containers returns a slice enumerating the known containers. + Containers() ([]Container, error) +} + +type containerStore struct { + lockfile Locker + dir string + containers []*Container + idindex *truncindex.TruncIndex + byid map[string]*Container + bylayer map[string]*Container + byname map[string]*Container + loadMut sync.Mutex +} + +func copyContainer(c *Container) *Container { + return &Container{ + ID: c.ID, + Names: copyStringSlice(c.Names), + ImageID: c.ImageID, + LayerID: c.LayerID, + Metadata: c.Metadata, + BigDataNames: copyStringSlice(c.BigDataNames), + BigDataSizes: copyStringInt64Map(c.BigDataSizes), + BigDataDigests: copyStringDigestMap(c.BigDataDigests), + Created: c.Created, + UIDMap: copyIDMap(c.UIDMap), + GIDMap: copyIDMap(c.GIDMap), + Flags: copyStringInterfaceMap(c.Flags), + } +} + +func (c *Container) MountLabel() string { + if label, ok := c.Flags["MountLabel"].(string); ok { + return label + } + return "" +} + +func (c *Container) ProcessLabel() string { + if label, ok := c.Flags["ProcessLabel"].(string); ok { + return label + } + return "" +} + +func (c *Container) MountOpts() []string { + switch c.Flags["MountOpts"].(type) { + case []string: + return c.Flags["MountOpts"].([]string) + case []interface{}: + var mountOpts []string + for _, v := range c.Flags["MountOpts"].([]interface{}) { + if flag, ok := v.(string); ok { + mountOpts = append(mountOpts, flag) + } + } + return mountOpts + default: + return nil + } +} + +func (r *containerStore) Containers() ([]Container, error) { + containers := make([]Container, len(r.containers)) + for i := range r.containers { + containers[i] = *copyContainer(r.containers[i]) + } + return containers, nil +} + +func (r *containerStore) containerspath() string { + return filepath.Join(r.dir, "containers.json") +} + +func (r *containerStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *containerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *containerStore) Load() error { + needSave := false + rpath := r.containerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + containers := []*Container{} + layers := make(map[string]*Container) + idlist := []string{} + ids := make(map[string]*Container) + names := make(map[string]*Container) + if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(containers)) + for n, container := range containers { + idlist = append(idlist, container.ID) + ids[container.ID] = containers[n] + layers[container.LayerID] = containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = containers[n] + } + } + } + r.containers = containers + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.bylayer = layers + r.byname = names + if needSave { + return r.Save() + } + return nil +} + +func (r *containerStore) Save() error { + if !r.Locked() { + return errors.New("container store is not locked") + } + rpath := r.containerspath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jdata, err := json.Marshal(&r.containers) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newContainerStore(dir string) (ContainerStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + cstore := containerStore{ + lockfile: lockfile, + dir: dir, + containers: []*Container{}, + byid: make(map[string]*Container), + bylayer: make(map[string]*Container), + byname: make(map[string]*Container), + } + if err := cstore.Load(); err != nil { + return nil, err + } + return &cstore, nil +} + +func (r *containerStore) lookup(id string) (*Container, bool) { + if container, ok := r.byid[id]; ok { + return container, ok + } else if container, ok := r.byname[id]; ok { + return container, ok + } else if container, ok := r.bylayer[id]; ok { + return container, ok + } else if longid, err := r.idindex.Get(id); err == nil { + if container, ok := r.byid[longid]; ok { + return container, ok + } + } + return nil, false +} + +func (r *containerStore) ClearFlag(id string, flag string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + delete(container.Flags, flag) + return r.Save() +} + +func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if container.Flags == nil { + container.Flags = make(map[string]interface{}) + } + container.Flags[flag] = value + return r.Save() +} + +func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) { + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + if options.MountOpts != nil { + options.Flags["MountOpts"] = append([]string{}, options.MountOpts...) + } + if options.Volatile { + options.Flags["Volatile"] = true + } + names = dedupeNames(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, errors.Wrapf(ErrDuplicateName, + fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) + } + } + if err := hasOverlappingRanges(options.UIDMap); err != nil { + return nil, err + } + if err := hasOverlappingRanges(options.GIDMap); err != nil { + return nil, err + } + if err == nil { + container = &Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: time.Now().UTC(), + Flags: copyStringInterfaceMap(options.Flags), + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + r.containers = append(r.containers, container) + r.byid[id] = container + r.idindex.Add(id) + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container + } + err = r.Save() + container = copyContainer(container) + } + return container, err +} + +func (r *containerStore) Metadata(id string) (string, error) { + if container, ok := r.lookup(id); ok { + return container.Metadata, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) SetMetadata(id, metadata string) error { + if container, ok := r.lookup(id); ok { + container.Metadata = metadata + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) removeName(container *Container, name string) { + container.Names = stringSliceWithoutValue(container.Names, name) +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (r *containerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *containerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *containerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) + } + r.byname[name] = container + } + container.Names = names + return r.Save() +} + +func (r *containerStore) Delete(id string) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + id = container.ID + toDeleteIndex := -1 + for i, candidate := range r.containers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + delete(r.byid, id) + r.idindex.Delete(id) + delete(r.bylayer, container.LayerID) + for _, name := range container.Names { + delete(r.byname, name) + } + if toDeleteIndex != -1 { + // delete the container at toDeleteIndex + if toDeleteIndex == len(r.containers)-1 { + r.containers = r.containers[:len(r.containers)-1] + } else { + r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) + } + } + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +func (r *containerStore) Get(id string) (*Container, error) { + if container, ok := r.lookup(id); ok { + return copyContainer(container), nil + } + return nil, ErrContainerUnknown +} + +func (r *containerStore) Lookup(name string) (id string, err error) { + if container, ok := r.lookup(name); ok { + return container.ID, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *containerStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") + } + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return ioutil.ReadFile(r.datapath(c.ID, key)) +} + +func (r *containerStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") + } + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return -1, ErrContainerUnknown + } + if size, ok := c.BigDataSizes[key]; ok { + return size, nil + } + } else { + return -1, err + } + } + return -1, ErrSizeUnknown +} + +func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") + } + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + if err = r.SetBigData(id, key, data); err == nil { + c, ok := r.lookup(id) + if !ok { + return "", ErrContainerUnknown + } + if d, ok := c.BigDataDigests[key]; ok { + return d, nil + } + } else { + return "", err + } + } + return "", ErrDigestUnknown +} + +func (r *containerStore) BigDataNames(id string) ([]string, error) { + c, ok := r.lookup(id) + if !ok { + return nil, ErrContainerUnknown + } + return copyStringSlice(c.BigDataNames), nil +} + +func (r *containerStore) SetBigData(id, key string, data []byte) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") + } + c, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) + if err == nil { + save := false + if c.BigDataSizes == nil { + c.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := c.BigDataSizes[key] + c.BigDataSizes[key] = int64(len(data)) + if c.BigDataDigests == nil { + c.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := c.BigDataDigests[key] + newDigest := digest.Canonical.FromBytes(data) + c.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range c.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + c.BigDataNames = append(c.BigDataNames, key) + save = true + } + if save { + err = r.Save() + } + } + return err +} + +func (r *containerStore) Wipe() error { + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *containerStore) Lock() { + r.lockfile.Lock() +} + +func (r *containerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *containerStore) RLock() { + r.lockfile.RLock() +} + +func (r *containerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *containerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *containerStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *containerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *containerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *containerStore) Locked() bool { + return r.lockfile.Locked() +} + +func (r *containerStore) ReloadIfChanged() error { + r.loadMut.Lock() + defer r.loadMut.Unlock() + + modified, err := r.Modified() + if err == nil && modified { + return r.Load() + } + return err +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go new file mode 100644 index 00000000000..e66613c098a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -0,0 +1,768 @@ +//go:build linux +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/fs" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + mountpk "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver + locker *locker.Locker + mountOptions string +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") + + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs) + } + + var mountOptions string + for _, option := range options.DriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "aufs.mountopt": + mountOptions = val + default: + return nil, fmt.Errorf("option %s not supported", option) + } + } + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), + mountOptions: mountOptions, + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(home); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(home, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + }) + + for _, path := range []string{"mnt", "diff"} { + p := filepath.Join(home, path) + entries, err := ioutil.ReadDir(p) + if err != nil { + logger.WithError(err).WithField("dir", p).Error("error reading dir entries") + continue + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if strings.HasSuffix(entry.Name(), "-removing") { + logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") + if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { + logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") + } + } + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, a) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if userns.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// Metadata not implemented +func (a *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (a *Driver) AdditionalImageStores() []string { + return nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if opts == nil { + opts = &graphdriver.CreateOpts{} + } + return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id, parent); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id, parent string) error { + paths := []string{ + "mnt", + "diff", + } + + // Directory permission is 0555. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + rootPair := idtools.NewIDMappingsFromMaps(a.uidMaps, a.gidMaps).RootPair() + rootPerms := defaultPerms + if parent != "" { + st, err := system.Stat(path.Join(a.rootPath(), p, parent)) + if err != nil { + return err + } + rootPerms = os.FileMode(st.Mode()) + rootPair.UID = int(st.UID()) + rootPair.GID = int(st.GID()) + } + if err := idtools.MkdirAllAndChownNew(path.Join(a.rootPath(), p, id), rootPerms, rootPair); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "graphdriver", + "driver": "aufs", + "layer": id, + }) + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + if os.IsNotExist(err) { + break + } + return err + } + if !mounted { + break + } + + err = a.unmount(mountpoint) + if err == nil { + break + } + + if err != unix.EBUSY { + return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) + } + if retries >= 5 { + return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "error removing layers dir for %s", id) + } + + if err := atomicRemove(a.getDiffPath(id)); err != nil { + return errors.Wrapf(err, "could not remove diff path for id %s", id) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that container runtime doesn't find it anymore) before doing removal of + // the whole tree. + if err := atomicRemove(mountpoint); err != nil { + if errors.Cause(err) == unix.EBUSY { + logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") + } + return errors.Wrapf(err, "could not remove mountpoint for id %s", id) + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +func atomicRemove(source string) error { + target := source + "-removing" + + err := os.Rename(source, target) + switch { + case err == nil, os.IsNotExist(err): + case os.IsExist(err): + // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove + if _, e := os.Stat(source); !os.IsNotExist(e) { + return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target) + } + default: + return errors.Wrapf(err, "error preparing atomic delete") + } + + return system.EnsureRemoveAll(target) +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, parents, options); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For AUFS, it queries the mountpoint for this ID. +func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + return directory.Usage(m) +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, idMappings *idtools.IDMappings, diff io.Reader) error { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, options) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, options.Mappings, options.Diff); err != nil { + return + } + + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, options); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len(",dirperm1") + } + b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + mountOptions := a.mountOptions + if len(options.Options) > 0 { + mountOptions = strings.Join(options.Options, ",") + } + if mountOptions != "" { + opts += fmt.Sprintf(",%s", mountOptions) + } + + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel) + if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "storage-aufs-base") + if err != nil { + logrus.Errorf("Checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "storage-aufs-union") + if err != nil { + logrus.Errorf("Checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("Checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} + +// UpdateLayerIDMap updates ID mappings in a layer from matching the ones +// specified by toContainer to those specified by toHost. +func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("aufs doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (a *Driver) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go new file mode 100644 index 00000000000..d2325fc46cd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go new file mode 100644 index 00000000000..100e7537a9c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := unix.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go new file mode 100644 index 00000000000..937104ba3fd --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "golang.org/x/sys/unix" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return unix.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go new file mode 100644 index 00000000000..d030b066378 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go new file mode 100644 index 00000000000..339aa0d3809 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -0,0 +1,684 @@ +//go:build linux && cgo +// +build linux,cgo + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "io/fs" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "unsafe" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("btrfs", Init) +} + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, userDiskQuota, err := parseOptions(options.DriverOptions) + if err != nil { + return nil, err + } + + driver := &Driver{ + home: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + options: opt, + } + + if userDiskQuota { + if err := driver.enableQuota(); err != nil { + return nil, err + } + } + + return graphdriver.NewNaiveDiffDriver(driver, graphdriver.NewNaiveLayerIDMapUpdater(driver)), nil +} + +func parseOptions(opt []string) (btrfsOptions, bool, error) { + var options btrfsOptions + userDiskQuota := false + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, userDiskQuota, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, userDiskQuota, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + case "btrfs.mountopt": + return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options") + default: + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) + } + } + return options, userDiskQuota, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// Metadata returns empty metadata for this driver. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat unix.Stat_t + if err := unix.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string, quotaEnabled bool) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, d fs.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if d.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := qgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) enableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + d.quotaEnabled = true + + return nil +} + +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +// qgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the returned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func qgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + if err := os.Chmod(path.Join(subvolumes, id), defaultPerms); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if err := d.enableQuota(); err != nil { + return err + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { + if d.quotaEnabled { + return err + } + // If quota is not enabled, fallback to rmdir syscall to delete subvolumes. + // This would allow unprivileged user to delete their owned subvolumes + // in kernel >= 4.18 without user_subvol_rm_alowed mount option. + } + if err := system.EnsureRemoveAll(dir); err != nil { + return err + } + if err := d.subvolRescanQuota(); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + switch len(options.Options) { + case 0: + case 1: + if options.Options[0] == "ro" { + // ignore "ro" option + break + } + fallthrough + default: + return "", fmt.Errorf("btrfs driver does not support mount options") + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.enableQuota(); err != nil { + return "", err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return "", err + } + } + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For BTRFS, it queries the subvolumes path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.subvolumesDirID(id)) +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go new file mode 100644 index 00000000000..f07088887a1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go new file mode 100644 index 00000000000..edd8bdab85e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion,cgo + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go new file mode 100644 index 00000000000..905e834e354 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build !linux btrfs_noversion !cgo + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go new file mode 100644 index 00000000000..2db6764c91b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -0,0 +1,136 @@ +package graphdriver + +import ( + "bytes" + "fmt" + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/selinux/pkg/pwalk" +) + +const ( + chownByMapsCmd = "storage-chown-by-maps" +) + +func init() { + reexec.Register(chownByMapsCmd, chownByMapsMain) +} + +func chownByMapsMain() { + if len(os.Args) < 2 { + fmt.Fprintf(os.Stderr, "requires mapping configuration on stdin and directory path") + os.Exit(1) + } + // Read and decode our configuration. + discreteMaps := [4][]idtools.IDMap{} + config := bytes.Buffer{} + if _, err := config.ReadFrom(os.Stdin); err != nil { + fmt.Fprintf(os.Stderr, "error reading configuration: %v", err) + os.Exit(1) + } + if err := json.Unmarshal(config.Bytes(), &discreteMaps); err != nil { + fmt.Fprintf(os.Stderr, "error decoding configuration: %v", err) + os.Exit(1) + } + // Try to chroot. This may not be possible, and on some systems that + // means we just Chdir() to the directory, so from here on we should be + // using relative paths. + if err := chrootOrChdir(os.Args[1]); err != nil { + fmt.Fprintf(os.Stderr, "error chrooting to %q: %v", os.Args[1], err) + os.Exit(1) + } + // Build the mapping objects. + toContainer := idtools.NewIDMappingsFromMaps(discreteMaps[0], discreteMaps[1]) + if len(toContainer.UIDs()) == 0 && len(toContainer.GIDs()) == 0 { + toContainer = nil + } + toHost := idtools.NewIDMappingsFromMaps(discreteMaps[2], discreteMaps[3]) + if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { + toHost = nil + } + + chowner := newLChowner() + + chown := func(path string, info os.FileInfo, _ error) error { + if path == "." { + return nil + } + return chowner.LChown(path, info, toHost, toContainer) + } + if err := pwalk.Walk(".", chown); err != nil { + fmt.Fprintf(os.Stderr, "error during chown: %v", err) + os.Exit(1) + } + os.Exit(0) +} + +// ChownPathByMaps walks the filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error { + if toContainer == nil { + toContainer = &idtools.IDMappings{} + } + if toHost == nil { + toHost = &idtools.IDMappings{} + } + + config, err := json.Marshal([4][]idtools.IDMap{toContainer.UIDs(), toContainer.GIDs(), toHost.UIDs(), toHost.GIDs()}) + if err != nil { + return err + } + cmd := reexec.Command(chownByMapsCmd, path) + cmd.Stdin = bytes.NewReader(config) + output, err := cmd.CombinedOutput() + if len(output) > 0 && err != nil { + return fmt.Errorf("%v: %s", err, string(output)) + } + if err != nil { + return err + } + if len(output) > 0 { + return fmt.Errorf("%s", string(output)) + } + + return nil +} + +type naiveLayerIDMapUpdater struct { + ProtoDriver +} + +// NewNaiveLayerIDMapUpdater wraps the ProtoDriver in a LayerIDMapUpdater that +// uses ChownPathByMaps to update the ownerships in a layer's filesystem tree. +func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater { + return &naiveLayerIDMapUpdater{ProtoDriver: driver} +} + +// UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership +// information using the toContainer and toHost mappings, using them to replace +// on-disk owner UIDs and GIDs which are "host" values in the first map with +// UIDs and GIDs for "host" values from the second map which correspond to the +// same "container" IDs. +func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + driver := n.ProtoDriver + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return err + } + defer func() { + driver.Put(id) + }() + + return ChownPathByMaps(layerFs, toContainer, toHost) +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (n *naiveLayerIDMapUpdater) SupportsShifting() bool { + return false +} diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go new file mode 100644 index 00000000000..c598b936d64 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -0,0 +1,109 @@ +//go:build !windows +// +build !windows + +package graphdriver + +import ( + "errors" + "fmt" + "os" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + st, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUID, mappedGID, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + } + mappedUID, mappedGID = uid, gid + } + uid, gid = mappedUID, mappedGID + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHostOverflow(pair) + if err != nil { + return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + cap, err := system.Lgetxattr(path, "security.capability") + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + + // Make the change. + if err := system.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + // Restore the SUID and SGID bits if they were originally set. + if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { + if err := system.Chmod(path, info.Mode()); err != nil { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + } + if cap != nil { + if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { + return fmt.Errorf("%s: %v", os.Args[0], err) + } + } + + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go new file mode 100644 index 00000000000..1845a4e086c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -0,0 +1,22 @@ +//go:build windows +// +build windows + +package graphdriver + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +type platformChowner struct { +} + +func newLChowner() *platformChowner { + return &platformChowner{} +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + return &os.PathError{"lchown", path, syscall.EWINDOWS} +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_unix.go b/vendor/github.com/containers/storage/drivers/chroot_unix.go new file mode 100644 index 00000000000..c8c4905bfee --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_unix.go @@ -0,0 +1,21 @@ +// +build linux darwin freebsd solaris + +package graphdriver + +import ( + "fmt" + "os" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("error chrooting to %q: %v", path, err) + } + if err := syscall.Chdir(string(os.PathSeparator)); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go new file mode 100644 index 00000000000..f4dc22a9615 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -0,0 +1,15 @@ +package graphdriver + +import ( + "fmt" + "syscall" +) + +// chrootOrChdir() is either a chdir() to the specified path, or a chroot() to the +// specified path followed by chdir() to the new root directory +func chrootOrChdir(path string) error { + if err := syscall.Chdir(path); err != nil { + return fmt.Errorf("error changing to %q: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_linux.go b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go new file mode 100644 index 00000000000..7773844f916 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_linux.go @@ -0,0 +1,306 @@ +// +build cgo + +package copy + +/* +#include + +#ifndef FICLONE +#define FICLONE _IOW(0x94, 9, int) +#endif +*/ +import "C" +import ( + "container/list" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runc/libcontainer/userns" + "golang.org/x/sys/unix" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota + // Hardlink creates a new hardlink to the existing file + Hardlink +) + +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + if *copyWithFileClone { + _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) + if err == nil { + return nil + } + + *copyWithFileClone = false + if err == unix.EXDEV { + *copyWithFileRange = false + } + } + if *copyWithFileRange { + err = doCopyWithFileRange(srcFile, dstFile, fileinfo) + // Trying the file_clone may not have caught the exdev case + // as the ioctl may not have been available (therefore EINVAL) + if err == unix.EXDEV || err == unix.ENOSYS { + *copyWithFileRange = false + } else { + return err + } + } + return legacyCopy(srcFile, dstFile) +} + +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { // nolint: golint + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) + if err != nil { + return err + } + defer dstFile.Close() + + return CopyRegularToFile(srcPath, dstFile, fileinfo, copyWithFileRange, copyWithFileClone) +} + +func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { + amountLeftToCopy := fileinfo.Size() + + for amountLeftToCopy > 0 { + n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) + if err != nil { + return err + } + + amountLeftToCopy = amountLeftToCopy - int64(n) + } + + return nil +} + +func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { + _, err := pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +type fileID struct { + dev uint64 + ino uint64 +} + +type dirMtimeInfo struct { + dstPath *string + stat *syscall.Stat_t +} + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling xattrs, and soft links +// +// Copying xattrs can be opted out of by passing false for copyXattrs. +func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { + copyWithFileRange := true + copyWithFileClone := true + + // This is a map of source file inodes to dst file paths + copiedFiles := make(map[fileID]string) + + dirsToSetMtimes := list.New() + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch mode := f.Mode(); { + case mode.IsRegular(): + id := fileID{dev: uint64(stat.Dev), ino: stat.Ino} + if copyMode == Hardlink { + isHardlink = true + if err2 := os.Link(srcPath, dstPath); err2 != nil { + return err2 + } + } else if hardLinkDstPath, ok := copiedFiles[id]; ok { + if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { + return err2 + } + } else { + if err2 := CopyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { + return err2 + } + copiedFiles[id] = dstPath + } + + case mode.IsDir(): + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case mode&os.ModeSymlink != 0: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case mode&os.ModeNamedPipe != 0: + if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case mode&os.ModeSocket != 0: + s, err := net.Listen("unix", dstPath) + if err != nil { + return err + } + s.Close() + + case mode&os.ModeDevice != 0: + if userns.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if copyXattrs { + if err := doCopyXattrs(srcPath, dstPath); err != nil { + return err + } + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + // nolint: unconvert + if f.IsDir() { + dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) + } else if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { + mtimeInfo := e.Value.(*dirMtimeInfo) + ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} + if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { + return err + } + } + + return nil +} + +func doCopyXattrs(srcPath, dstPath string) error { + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + xattrs, err := system.Llistxattr(srcPath) + if err != nil && !errors.Is(err, unix.EOPNOTSUPP) { + return err + } + + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + if err := copyXattr(srcPath, dstPath, key); err != nil { + return err + } + } + } + + if unshare.IsRootless() { + return nil + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") +} diff --git a/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go new file mode 100644 index 00000000000..e97523c3578 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go @@ -0,0 +1,40 @@ +// +build !linux !cgo + +package copy + +import ( + "io" + "os" + + "github.com/containers/storage/pkg/chrootarchive" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota +) + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling soft links +func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} + +// CopyRegularToFile copies the content of a file to another +func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + f, err := os.Open(srcPath) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(dstFile, f) + return err +} + +// CopyRegular copies the content of a file to another +func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath) +} diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/containers/storage/drivers/counter.go new file mode 100644 index 00000000000..3fc45495b2e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/counter.go @@ -0,0 +1,63 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } else if !c.checker.IsMounted(path) { + // if the unmount was performed outside of this process (e.g. conmon cleanup) + //the ref counter would lose track of it. Check if it is still mounted. + m.count = 0 + } + infoOp(m) + count := m.count + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go new file mode 100644 index 00000000000..c23097b7635 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go @@ -0,0 +1,248 @@ +// +build linux,cgo + +package devmapper + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type directLVMConfig struct { + Device string + ThinpPercent uint64 + ThinpMetaPercent uint64 + AutoExtendPercent uint64 + AutoExtendThreshold uint64 + MetaDataSize string +} + +var ( + errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") + errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") + errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm") +) + +func validateLVMConfig(cfg directLVMConfig) error { + if cfg.Device == "" { + return errMissingSetupDevice + } + if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + absPath, err := filepath.Abs(dev) + if err != nil { + return errors.Errorf("unable to get absolute path for %s: %s", dev, err) + } + realPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + return errors.Errorf("failed to canonicalise path for %s: %s", dev, err) + } + if err := checkDevAvailable(absPath); err != nil { + logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) + if err := checkDevAvailable(realPath); err != nil { + return errors.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device.", absPath, realPath) + } + } + if err := checkDevInVG(realPath); err != nil { + return err + } + + if force { + return nil + } + + if err := checkDevHasFS(realPath); err != nil { + return err + } + return nil +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + lvmProfileDir := "/etc/lvm/profile" + binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} + + for _, bin := range binaries { + if _, err := exec.LookPath(bin); err != nil { + return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") + } + } + + err := os.MkdirAll(lvmProfileDir, 0755) + if err != nil { + return errors.Wrap(err, "error creating lvm profile directory") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + if cfg.MetaDataSize == "" { + cfg.MetaDataSize = "128k" + } + + out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing storage thinp autoextend profile") + } + + out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go new file mode 100644 index 00000000000..e604b7e3186 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -0,0 +1,2866 @@ +//go:build linux && cgo +// +build linux,cgo + +package devmapper + +import ( + "bufio" + "fmt" + "io" + "io/fs" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/dmesg" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/loopback" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/parsers/kernel" + units "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool +) + +const ( + deviceSetMetaFile = "deviceset-metadata" + transactionMetaFile = "transaction-metadata" + xfs = "xfs" + ext4 = "ext4" + base = "base" +) + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataSize string + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = base + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = base + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + return (devices.deviceIDMap[deviceID/8] & mask) == 0 +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(name, ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(name, ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if name == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if name == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(name); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", name, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if d.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, d.Name()) + } + + return filepath.WalkDir(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v)", hash) + info := &devInfo{ + Hash: hash, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error +func xfsSupported() error { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return err // error text is descriptive enough + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", xfs).Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return errors.Wrapf(err, "error checking for xfs support") + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return nil + } + } + + if err := s.Err(); err != nil { + return errors.Wrapf(err, "error checking for xfs support") + } + + return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) +} + +func determineDefaultFS() string { + err := xfsSupported() + if err == nil { + return xfs + } + + logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to %s filesystem", ext4, err) + return ext4 +} + +// mkfsOptions tries to figure out whether some additional mkfs options are required +func mkfsOptions(fs string) []string { + if fs == xfs && !kernel.CheckKernelVersion(3, 16, 0) { + // For kernels earlier than 3.16 (and newer xfsutils), + // some xfs features need to be explicitly disabled. + return []string{"-m", "crc=0,finobt=0"} + } + + return []string{} +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + args := mkfsOptions(devices.filesystem) + args = append(args, devices.mkfsArgs...) + args = append(args, devname) + + logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case xfs: + err = exec.Command("mkfs.xfs", args...).Run() + case ext4: + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + // Cleanup right away if there are any leaked devices. Note this + // could cause some slowdown for process startup, if there were + // Leaked devices + devices.cleanupDeletedDevices() + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + devinfo = nil + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/containers/storage/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == xfs { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + } + + defer func() { + if err := mount.Unmount(fsMountPoint); err != nil { + logrus.Warnf("devmapper.growFS cleanup error: %v", err) + } + }() + + switch devices.BaseDeviceFilesystem { + case ext4: + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case xfs: + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + fileInfos, _ := ioutil.ReadDir("/proc/self/fd") + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + unix.CloseOnExec(fd) + } + } + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + return os.RemoveAll(devices.transactionMetaFile()) +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + // Kernel driver version >= 4.27.0 support deferred removal + + logrus.Debugf("devicemapper: kernel dm driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0]) + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1]) + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + var stat unix.Stat_t + err := unix.Stat(file.Name(), &stat) + if err != nil { + return 0, 0, err + } + + dev := stat.Rdev + majorNum := major(uint64(dev)) + minorNum := minor(uint64(dev)) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil { + return err + } + + prevSetupConfig, err := readLVMConfig(devices.root) + if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "storage-thinpool" + logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the storage root dir + var st unix.Stat_t + if err := unix.Stat(devices.root, &st); err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // container-maj,min[-inode] stands for: + // - Managed by container storage + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) + if err != nil { + return err + } + switch fsMagic { + case graphdriver.FsMagicAufs: + return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") + } + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) + } + } + }() + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) error { + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", info.Hash, info.DeviceID) + return err + } + + defer devices.closeTransaction() + + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + // If deferred removal is enabled and deferred deletion is disabled + // then make sure device is removed synchronously. There have been + // some cases of device being busy for short duration and we would + // rather busy wait for device removal to take care of these cases. + deferredRemove := devices.deferredRemove + if !devices.deferredDelete { + deferredRemove = false + } + + if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteDeviceNoLock(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + return devices.deactivateDeviceMode(info, devices.deferredRemove) +} + +func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { + var err error + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if deferredRemove { + err = devicemapper.RemoveDeviceDeferred(info.Name()) + } else { + err = devices.removeDevice(info.Name()) + } + + // This function's semantics is such that it does not return an + // error if device does not exist. So if device went away by + // the time we actually tried to remove it, do not return error. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if errors.Cause(err) != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if errors.Cause(err) != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if errors.Cause(err) != devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { + files, err := ioutil.ReadDir(dir) + if err != nil { + logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) + return + } + + for _, d := range files { + if !d.IsDir() { + continue + } + + name := d.Name() + fullname := path.Join(dir, name) + + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := mount.Unmount(fullname); err != nil { + logrus.Warnf("devmapper.Shutdown error: %s", err) + } + + if devInfo, err := devices.lookupDevice(name); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err) + } + } + } +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + devices.unmountAndDeactivateAll(path.Join(home, "mnt")) + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == xfs { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + mountOptions := devices.mountOptions + if len(moptions.Options) > 0 { + addNouuid := strings.Contains("nouuid", mountOptions) + mountOptions = strings.Join(moptions.Options, ",") + if addNouuid { + mountOptions = fmt.Sprintf("nouuid,%s", mountOptions) + } + } + + options = joinMountOptions(options, mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256))) + } + + if fstype == xfs && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + if err := mount.Unmount(path); err != nil { + logrus.Warnf("devmapper.MountDevice cleanup error: %v", err) + } + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := mount.Unmount(mountPath); err != nil { + if ok, _ := Mounted(mountPath); ok { + return err + } + } + logrus.Debug("devmapper: Unmount done") + + // Remove the mountpoint here. Removing the mountpoint (in newer kernels) + // will cause all other instances of this mount in other mount namespaces + // to be killed (this is an anti-DoS measure that is necessary for things + // like devicemapper). This is necessary to avoid cases where a libdm mount + // that is present in another namespace will cause subsequent RemoveDevice + // operations to fail. We ignore any errors here because this may fail on + // older kernels which don't have + // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. + if err := os.Remove(mountPath); err != nil { + logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err) + } + + return devices.deactivateDevice(info) +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(unix.Statfs_t) + if err := unix.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig + testMode := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != ext4 && val != xfs { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt", "devicemapper.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.metadata_size": + devices.metaDataSize = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.metaDataSize": + lvmSetupConfig.MetaDataSize = val + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + case "dm.libdm_log_level": + level, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + } + if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { + return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + } + // Register a new logging callback with the specified level. + devicemapper.LogInit(devicemapper.DefaultLogger{ + Level: int(level), + }) + case "test": + testMode, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("devmapper: Unknown option %s", key) + } + } + + if !testMode { + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + } + + devices.lvmSetupConfig = lvmSetupConfig + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go new file mode 100644 index 00000000000..418b9e61087 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -0,0 +1,108 @@ +// +build linux,cgo + +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go new file mode 100644 index 00000000000..d2f165e26de --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -0,0 +1,273 @@ +// +build linux,cgo + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/locker" + "github.com/containers/storage/pkg/mount" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + locker *locker.Locker +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), + } + + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, + {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, + {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, + {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, + {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, + {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, + {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, + {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// Metadata returns a map of information about the device. +func (d *Driver) Metadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + umountErr := mount.Unmount(d.home) + // in case we have two errors, prefer the one from Shutdown() + if err != nil { + return err + } + + return umountErr +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. +func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return fmt.Errorf("failed to remove device %s: %v", id, err) + } + + // Most probably the mount point is already removed on Put() + // (see DeviceSet.UnmountDevice()), but just in case it was not + // let's try to remove it here as well, ignoring errors as + // an older kernel can return EBUSY if e.g. the mount was leaked + // to other mount namespaces. A failure to remove the container's + // mount point is not important and should not be treated + // as a failure to remove the container. + mp := path.Join(d.home, "mnt", id) + err := unix.Rmdir(mp) + if err != nil && !os.IsNotExist(err) { + logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, options); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err) + } + + return err +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For devmapper, it queries the mnt path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) + return directory.Usage(path.Join(d.home, "mnt", id)) +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go new file mode 100644 index 00000000000..54db6ab4aea --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go @@ -0,0 +1,5 @@ +package devmapper + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go new file mode 100644 index 00000000000..41e73faf525 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/devmapper/mount.go @@ -0,0 +1,88 @@ +// +build linux,cgo + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + var mntpointSt unix.Stat_t + if err := unix.Stat(mountpoint, &mntpointSt); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + var parentSt unix.Stat_t + if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { + return false, err + } + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go new file mode 100644 index 00000000000..770b431bdd3 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -0,0 +1,408 @@ +package graphdriver + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + digest "github.com/opencontainers/go-digest" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = errors.New("backing file system is unsupported for this graph driver") + // ErrLayerUnknown returned when the specified layer is unknown by the driver. + ErrLayerUnknown = errors.New("unknown layer") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string + *idtools.IDMappings + ignoreChownErrors bool +} + +// MountOpts contains optional arguments for LayerStope.Mount() methods. +type MountOpts struct { + // Mount label is the MAC Labels to assign to mount point (SELINUX) + MountLabel string + // UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point + UidMaps []idtools.IDMap // nolint: golint + GidMaps []idtools.IDMap // nolint: golint + Options []string + + // Volatile specifies whether the container storage can be optimized + // at the cost of not syncing all the dirty files in memory. + Volatile bool + + // DisableShifting forces the driver to not do any ID shifting at runtime. + DisableShifting bool +} + +// ApplyDiffOpts contains optional arguments for ApplyDiff methods. +type ApplyDiffOpts struct { + Diff io.Reader + Mappings *idtools.IDMappings + MountLabel string + IgnoreChownErrors bool + ForceMask *os.FileMode +} + +// InitFunc initializes the storage driver. +type InitFunc func(homedir string, options Options) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // CreateFromTemplate creates a new filesystem layer with the specified id + // and parent, with contents identical to the specified template layer. + CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Optionally it gets the mappings used to create the layer. + // Returns the absolute path to the mounted layered filesystem. + Get(id string, options MountOpts) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + Metadata(id string) (map[string]string, error) + // ReadWriteDiskUsage returns the disk usage of the writable directory for the specified ID. + ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error + // AdditionalImageStores returns additional image stores supported by the driver + // This API is experimental and can be changed without bumping the major version number. + AdditionalImageStores() []string +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The io.Reader must be an uncompressed stream. + ApplyDiff(id string, parent string, options ApplyDiffOpts) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +} + +// LayerIDMapUpdater is the interface that implements ID map changes for layers. +type LayerIDMapUpdater interface { + // UpdateLayerIDMap walks the layer's filesystem tree, changing the ownership + // information using the toContainer and toHost mappings, using them to replace + // on-disk owner UIDs and GIDs which are "host" values in the first map with + // UIDs and GIDs for "host" values from the second map which correspond to the + // same "container" IDs. This method should only be called after a layer is + // first created and populated, and before it is mounted, as other changes made + // relative to a parent layer, but before this method is called, may be discarded + // by Diff(). + UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error + + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a + // image and it is not required to Chown the files when running in an user namespace. + SupportsShifting() bool +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver + LayerIDMapUpdater +} + +// DriverWithDifferOutput is the result of ApplyDiffWithDiffer +// This API is experimental and can be changed without bumping the major version number. +type DriverWithDifferOutput struct { + Differ Differ + Target string + Size int64 + UIDs []uint32 + GIDs []uint32 + UncompressedDigest digest.Digest + Metadata string + BigData map[string][]byte +} + +// Differ defines the interface for using a custom differ. +// This API is experimental and can be changed without bumping the major version number. +type Differ interface { + ApplyDiff(dest string, options *archive.TarOptions) (DriverWithDifferOutput, error) +} + +// DriverWithDiffer is the interface for direct diff access. +// This API is experimental and can be changed without bumping the major version number. +type DriverWithDiffer interface { + Driver + // ApplyDiffWithDiffer applies the changes using the callback function. + // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error) + // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. + ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// AdditionalLayer reprents a layer that is stored in the additional layer store +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayer interface { + // CreateAs creates a new layer from this additional layer + CreateAs(id, parent string) error + + // Info returns arbitrary information stored along with this layer (i.e. `info` file) + Info() (io.ReadCloser, error) + + // Blob returns a reader of the raw contents of this layer. + Blob() (io.ReadCloser, error) + + // Release tells the additional layer store that we don't use this handler. + Release() +} + +// AdditionalLayerStoreDriver is the interface for driver that supports +// additional layer store functionality. +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayerStoreDriver interface { + Driver + + // LookupAdditionalLayer looks up additional layer store by the specified + // digest and ref and returns an object representing that layer. + LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error) + + // LookupAdditionalLayer looks up additional layer store by the specified + // ID and returns an object representing that layer. + LookupAdditionalLayerByID(id string) (AdditionalLayer, error) +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config) + } + + logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) + return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + RunRoot string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + cause := errors.Cause(err) + return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go new file mode 100644 index 00000000000..143cccf92ec --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -0,0 +1,49 @@ +package graphdriver + +import ( + "golang.org/x/sys/unix" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on FreeBSD. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go new file mode 100644 index 00000000000..0fe3eea7ae6 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -0,0 +1,172 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "golang.org/x/sys/unix" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) + // FsMagicFUSE filesystem id for FUSE + FsMagicFUSE = FsMagic(0x65735546) + // FsMagicAcfs filesystem id for Acfs + FsMagicAcfs = FsMagic(0x61636673) + // FsMagicAfs filesystem id for Afs + FsMagicAfs = FsMagic(0x5346414f) + // FsMagicCephFs filesystem id for Ceph + FsMagicCephFs = FsMagic(0x00C36400) + // FsMagicCIFS filesystem id for CIFS + FsMagicCIFS = FsMagic(0xFF534D42) + // FsMagicFHGFS filesystem id for FHGFS + FsMagicFHGFSFs = FsMagic(0x19830326) + // FsMagicIBRIX filesystem id for IBRIX + FsMagicIBRIX = FsMagic(0x013111A8) + // FsMagicKAFS filesystem id for KAFS + FsMagicKAFS = FsMagic(0x6B414653) + // FsMagicLUSTRE filesystem id for LUSTRE + FsMagicLUSTRE = FsMagic(0x0BD00BD0) + // FsMagicNCP filesystem id for NCP + FsMagicNCP = FsMagic(0x564C) + // FsMagicNFSD filesystem id for NFSD + FsMagicNFSD = FsMagic(0x6E667364) + // FsMagicOCFS2 filesystem id for OCFS2 + FsMagicOCFS2 = FsMagic(0x7461636F) + // FsMagicPANFS filesystem id for PANFS + FsMagicPANFS = FsMagic(0xAAD7AAEA) + // FsMagicPRLFS filesystem id for PRLFS + FsMagicPRLFS = FsMagic(0x7C7C6673) + // FsMagicSMB2 filesystem id for SMB2 + FsMagicSMB2 = FsMagic(0xFE534D42) + // FsMagicSNFS filesystem id for SNFS + FsMagicSNFS = FsMagic(0xBEEFDEAD) + // FsMagicVBOXSF filesystem id for VBOXSF + FsMagicVBOXSF = FsMagic(0x786F4256) + // FsMagicVXFS filesystem id for VXFS + FsMagicVXFS = FsMagic(0xA501FCF5) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "overlay", + // We don't support devicemapper without configuration + // "devicemapper", + "aufs", + "btrfs", + "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf unix.Statfs_t + if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go new file mode 100644 index 00000000000..174fa9670bf --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_solaris.go @@ -0,0 +1,96 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/containers/storage/pkg/mount" + "github.com/sirupsen/logrus" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + defer C.free(unsafe.Pointer(cs)) + buf := C.getstatfs(cs) + defer C.free(unsafe.Pointer(buf)) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + return false, ErrPrerequisites + } + + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go new file mode 100644 index 00000000000..4a875608b0d --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go new file mode 100644 index 00000000000..ffd30c2950c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go new file mode 100644 index 00000000000..b7e681ace45 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -0,0 +1,220 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/sirupsen/logrus" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + LayerIDMapUpdater +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) +// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) +// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) +// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.TarWithOptions(layerFs, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + }) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + options.Options = append(options.Options, "ro") + parentFs, err := driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, idMappings.UIDs(), idMappings.GIDs()) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + options := MountOpts{ + MountLabel: mountLabel, + Options: []string{"ro"}, + } + parentFs, err = driver.Get(parent, options) + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, idMappings, parentFs, parentMappings) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error) { + driver := gdw.ProtoDriver + + if options.Mappings == nil { + options.Mappings = &idtools.IDMappings{} + } + + // Mount the root filesystem so we can apply the diff/layer. + mountOpts := MountOpts{ + MountLabel: options.MountLabel, + } + layerFs, err := driver.Get(id, mountOpts) + if err != nil { + return + } + defer driver.Put(id) + + tarOptions := &archive.TarOptions{ + InUserNS: userns.RunningInUserNS(), + IgnoreChownErrors: options.IgnoreChownErrors, + } + if options.Mappings != nil { + tarOptions.UIDMaps = options.Mappings.UIDs() + tarOptions.GIDMaps = options.Mappings.GIDs() + } + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, options.Diff, tarOptions); err != nil { + logrus.Errorf("While applying layer: %s", err) + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + driver := gdw.ProtoDriver + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + if parentMappings == nil { + parentMappings = &idtools.IDMappings{} + } + + changes, err := gdw.Changes(id, idMappings, parent, parentMappings, mountLabel) + if err != nil { + return + } + + options := MountOpts{ + MountLabel: mountLabel, + } + layerFs, err := driver.Get(id, options) + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/containers/storage/drivers/jsoniter.go b/vendor/github.com/containers/storage/drivers/jsoniter.go new file mode 100644 index 00000000000..097f923ab5b --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/jsoniter.go @@ -0,0 +1,5 @@ +package graphdriver + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go new file mode 100644 index 00000000000..48fb7a550fa --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -0,0 +1,274 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// doesSupportNativeDiff checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. +// When these exist naive diff should be used. +func doesSupportNativeDiff(d, mountOpts string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l1/d1, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), archive.GetOverlayXattrName("opaque"), []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + mountFlags := "lowerdir=%s:%s,upperdir=%s,workdir=%s" + if unshare.IsRootless() { + mountFlags = mountFlags + ",userxattr" + } + + opts := fmt.Sprintf(mountFlags, path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), archive.GetOverlayXattrName("opaque")) + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + // rename "d1" to "d2" + if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { + // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled + if err.(*os.LinkError).Err == syscall.EXDEV { + return nil + } + return errors.Wrap(err, "failed to rename dir in merged directory") + } + // get the xattr of "d2" + xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), archive.GetOverlayXattrName("redirect")) + if err != nil { + return errors.Wrap(err, "failed to read redirect flag on upper layer") + } + + if string(xattrRedirect) == "d1" { + return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") + } + + return nil +} + +// doesMetacopy checks if the filesystem is going to optimize changes to +// metadata by using nodes marked with an "overlay.metacopy" attribute to avoid +// copying up a file from a lower layer unless/until its contents are being +// modified +func doesMetacopy(d, mountOpts string) (bool, error) { + td, err := ioutil.TempDir(d, "metacopy-check") + if err != nil { + return false, err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1, l2, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil { + return false, err + } + if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil { + return false, err + } + if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return false, err + } + // Mount using the mandatory options and configured options + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "l1"), path.Join(td, "l2"), path.Join(td, "work")) + if unshare.IsRootless() { + opts = fmt.Sprintf("%s,userxattr", opts) + } + flags, data := mount.ParseOptions(mountOpts) + if data != "" { + opts = fmt.Sprintf("%s,%s", opts, data) + } + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil { + if errors.Cause(err) == unix.EINVAL { + logrus.Info("metacopy option not supported on this kernel", mountOpts) + return false, nil + } + return false, errors.Wrapf(err, "failed to mount overlay for metacopy check with %q options", mountOpts) + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + // Make a change that only impacts the inode, and check if the pulled-up copy is marked + // as a metadata-only copy + if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil { + return false, errors.Wrap(err, "error changing permissions on file for metacopy check") + } + metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), archive.GetOverlayXattrName("metacopy")) + if err != nil { + if errors.Is(err, unix.ENOTSUP) { + logrus.Info("metacopy option not supported") + return false, nil + } + return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer") + } + return metacopy != nil, nil +} + +// doesVolatile checks if the filesystem supports the "volatile" mount option +func doesVolatile(d string) (bool, error) { + td, err := ioutil.TempDir(d, "volatile-check") + if err != nil { + return false, err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + if err := os.MkdirAll(filepath.Join(td, "lower"), 0755); err != nil { + return false, err + } + if err := os.MkdirAll(filepath.Join(td, "upper"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return false, err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return false, err + } + // Mount using the mandatory options and configured options + opts := fmt.Sprintf("volatile,lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "lower"), path.Join(td, "upper"), path.Join(td, "work")) + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return false, errors.Wrapf(err, "failed to mount overlay for volatile check") + } + defer func() { + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + return true, nil +} + +// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of +// a idmapped lower layer. +func supportsIdmappedLowerLayers(home string) (bool, error) { + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerMappedDir := filepath.Join(layerDir, "lower-mapped") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0700, 0, 0) + _ = idtools.MkdirAs(workDir, 0700, 0, 0) + + idmap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 0, + Size: 1, + }, + } + pid, cleanupFunc, err := createUsernsProcess(idmap, idmap) + if err != nil { + return false, err + } + defer cleanupFunc() + + if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + return false, errors.Wrapf(err, "create mapped mount") + } + defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + defer func() { + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + }() + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_116.go b/vendor/github.com/containers/storage/drivers/overlay/check_116.go new file mode 100644 index 00000000000..6d7913cbfab --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/check_116.go @@ -0,0 +1,42 @@ +// +build go1.16 + +package overlay + +import ( + "io/fs" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/system" +) + +func scanForMountProgramIndicators(home string) (detected bool, err error) { + err = filepath.WalkDir(home, func(path string, d fs.DirEntry, err error) error { + if detected { + return fs.SkipDir + } + if err != nil { + return err + } + basename := filepath.Base(path) + if strings.HasPrefix(basename, archive.WhiteoutPrefix) { + detected = true + return fs.SkipDir + } + if d.IsDir() { + xattrs, err := system.Llistxattr(path) + if err != nil { + return err + } + for _, xattr := range xattrs { + if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { + detected = true + return fs.SkipDir + } + } + } + return nil + }) + return detected, err +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go new file mode 100644 index 00000000000..2af33a6fce0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go @@ -0,0 +1,160 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type attr struct { + attrSet uint64 + attrClr uint64 + propagation uint64 + userNs uint64 +} + +const ( + // _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr + _MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint + + // _OPEN_TREE_CLONE - Clone the source path mount + _OPEN_TREE_CLONE = 0x00000001 //nolint:golint + + // _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd + _MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint +) + +// openTree is a wrapper for the open_tree syscall +func openTree(path string, flags int) (fd int, err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return 0, err + } + + r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return int(r), nil +} + +// moveMount is a wrapper for the the move_mount syscall. +func moveMount(fdTree int, target string) (err error) { + var _p0, _p1 *byte + + empty := "" + + if _p0, err = syscall.BytePtrFromString(target); err != nil { + return err + } + if _p1, err = syscall.BytePtrFromString(empty); err != nil { + return err + } + + flags := _MOVE_MOUNT_F_EMPTY_PATH + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), + uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), + 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = e1 + } + return +} + +// mountSetAttr is a wrapper for the mount_setattr syscall +func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return err + } + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = e1 + } + return +} + +// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func createIDMappedMount(source, target string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/user", pid) + userNsFile, err := os.Open(path) + if err != nil { + return errors.Wrapf(err, "unable to get user ns file descriptor for %q", path) + } + + var attr attr + attr.attrSet = _MOUNT_ATTR_IDMAP + attr.attrClr = 0 + attr.propagation = 0 + attr.userNs = uint64(userNsFile.Fd()) + + defer userNsFile.Close() + + targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE) + if err != nil { + return err + } + defer unix.Close(targetDirFd) + + if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &attr, uint(unsafe.Sizeof(attr))); err != nil { + return err + } + if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + return err + } + return moveMount(targetDirFd, target) +} + +// createUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + if err != 0 { + return -1, nil, err + } + if pid == 0 { + _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) + // just wait for the SIGKILL + for { + syscall.Pause() + } + } + cleanupFunc := func() { + unix.Kill(int(pid), unix.SIGKILL) + _, _ = unix.Wait4(int(pid), nil, 0, nil) + } + writeMappings := func(fname string, idmap []idtools.IDMap) error { + mappings := "" + for _, m := range idmap { + mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + } + if err := writeMappings("uid_map", uidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + if err := writeMappings("gid_map", gidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + + return int(pid), cleanupFunc, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go new file mode 100644 index 00000000000..2a1e9d0cc1a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go @@ -0,0 +1,5 @@ +package overlay + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go new file mode 100644 index 00000000000..7c8fd50a3a5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay + +import ( + "bytes" + "flag" + "fmt" + "os" + "runtime" + + "github.com/containers/storage/pkg/reexec" + "golang.org/x/sys/unix" +) + +func init() { + reexec.Register("storage-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("storage-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) + } + return nil +} + +// mountfromMain is the entry-point for storage-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go new file mode 100644 index 00000000000..09d24ae8b0d --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -0,0 +1,2261 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/drivers/overlayutils" + "github.com/containers/storage/drivers/quota" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fsutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + units "github.com/docker/go-units" + "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +const ( + defaultPerms = os.FileMode(0555) + selinuxLabelTest = "system_u:object_r:container_file_t:s0" +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifier + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + imageStores []string + layerStores []additionalLayerStore + quota quota.Quota + mountProgram string + skipMountHome bool + mountOptions string + ignoreChownErrors bool + forceMask *os.FileMode +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + name string + home string + runhome string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool + supportsVolatile *bool + usingMetacopy bool + + supportsIDMappedMounts *bool +} + +type additionalLayerStore struct { + + // path is the directory where this store is available on the host. + path string + + // withReference is true when the store contains image reference information (base64-encoded) + // in its layer search path so the path to the diff will be + // /base64(reference)// + withReference bool +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register("overlay", Init) + graphdriver.Register("overlay2", Init) +} + +func hasMetacopyOption(opts []string) bool { + for _, s := range opts { + if s == "metacopy=on" { + return true + } + } + return false +} + +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + +func hasVolatileOption(opts []string) bool { + for _, s := range opts { + if s == "volatile" { + return true + } + } + return false +} + +func getMountProgramFlagFile(path string) string { + return filepath.Join(path, ".has-mount-program") +} + +func checkSupportVolatile(home, runhome string) (bool, error) { + feature := fmt.Sprintf("volatile") + volatileCacheResult, _, err := cachedFeatureCheck(runhome, feature) + var usingVolatile bool + if err == nil { + if volatileCacheResult { + logrus.Debugf("Cached value indicated that volatile is being used") + } else { + logrus.Debugf("Cached value indicated that volatile is not being used") + } + usingVolatile = volatileCacheResult + } else { + usingVolatile, err = doesVolatile(home) + if err == nil { + if usingVolatile { + logrus.Debugf("overlay: test mount indicated that volatile is being used") + } else { + logrus.Debugf("overlay: test mount indicated that volatile is not being used") + } + if err = cachedFeatureRecord(runhome, feature, usingVolatile, ""); err != nil { + return false, errors.Wrap(err, "recording volatile-being-used status") + } + } + } + return usingVolatile, nil +} + +// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a +// idmapped lower layer. +func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { + feature := "idmapped-lower-dir" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that overlay is supported") + return true, nil + } + logrus.Debugf("Cached value indicated that overlay is not supported") + return false, errors.New(overlayCacheText) + } + supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil { + return false, errors.Wrap(err2, "recording overlay idmapped mounts support status") + } + return supportsIDMappedMounts, err +} + +func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { + var supportsDType bool + + if os.Geteuid() != 0 { + return false, nil + } + + feature := "overlay" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that overlay is supported") + } else { + logrus.Debugf("Cached value indicated that overlay is not supported") + } + supportsDType = overlayCacheResult + if !supportsDType { + return false, errors.New(overlayCacheText) + } + } else { + supportsDType, err = supportsOverlay(home, fsMagic, 0, 0) + if err != nil { + os.Remove(filepath.Join(home, linkDir)) + os.Remove(home) + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + err = errors.Wrap(err, "kernel does not support overlay fs") + if err2 := cachedFeatureRecord(runhome, feature, false, err.Error()); err2 != nil { + return false, errors.Wrapf(err2, "recording overlay not being supported (%v)", err) + } + return false, err + } + if err = cachedFeatureRecord(runhome, feature, supportsDType, ""); err != nil { + return false, errors.Wrap(err, "recording overlay support status") + } + } + return supportsDType, nil +} + +func (d *Driver) getSupportsVolatile() (bool, error) { + if d.supportsVolatile != nil { + return *d.supportsVolatile, nil + } + supportsVolatile, err := checkSupportVolatile(d.home, d.runhome) + if err != nil { + return false, err + } + d.supportsVolatile = &supportsVolatile + return supportsVolatile, nil +} + +// isNetworkFileSystem checks if the specified file system is supported by native overlay +// as backing store when running in a user namespace. +func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { + switch fsMagic { + // a bunch of network file systems... + case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs, + graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS, + graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, + graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP, + graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS, + graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS, + graphdriver.FsMagicVBOXSF, graphdriver.FsMagicVXFS: + return true + } + return false +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + opts, err := parseOptions(options.DriverOptions) + if err != nil { + return nil, err + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if opts.mountProgram == "" { + if supported, err := SupportsNativeOverlay(home, runhome); err != nil { + return nil, err + } else if !supported { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.mountProgram = path + } + } + } + + if opts.mountProgram != "" { + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { + m := os.FileMode(0700) + opts.forceMask = &m + logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m) + } + + if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil { + return nil, err + } + } else { + if opts.forceMask != nil { + return nil, errors.New("'force_mask' is supported only with 'mount_program'") + } + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs) + } + if unshare.IsRootless() && isNetworkFileSystem(fsMagic) { + return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "A network file system with user namespaces is not supported. Please use a mount_program") + } + } + + var usingMetacopy bool + var supportsDType bool + var supportsVolatile *bool + if opts.mountProgram != "" { + supportsDType = true + t := true + supportsVolatile = &t + } else { + supportsDType, err = checkAndRecordOverlaySupport(fsMagic, home, runhome) + if err != nil { + return nil, err + } + feature := fmt.Sprintf("metacopy(%s)", opts.mountOptions) + metacopyCacheResult, _, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if metacopyCacheResult { + logrus.Debugf("Cached value indicated that metacopy is being used") + } else { + logrus.Debugf("Cached value indicated that metacopy is not being used") + } + usingMetacopy = metacopyCacheResult + } else { + usingMetacopy, err = doesMetacopy(home, opts.mountOptions) + if err == nil { + if usingMetacopy { + logrus.Debugf("overlay: test mount indicated that metacopy is being used") + } else { + logrus.Debugf("overlay: test mount indicated that metacopy is not being used") + } + if err = cachedFeatureRecord(runhome, feature, usingMetacopy, ""); err != nil { + return nil, errors.Wrap(err, "recording metacopy-being-used status") + } + } else { + logrus.Infof("overlay: test mount did not indicate whether or not metacopy is being used: %v", err) + return nil, err + } + } + } + + if !opts.skipMountHome { + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + } + + fileSystemType := graphdriver.FsMagicOverlay + if opts.mountProgram != "" { + fileSystemType = graphdriver.FsMagicFUSE + } + + d := &Driver{ + name: "overlay", + home: home, + runhome: runhome, + uidMaps: options.UIDMaps, + gidMaps: options.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)), + supportsDType: supportsDType, + usingMetacopy: usingMetacopy, + supportsVolatile: supportsVolatile, + options: *opts, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)) + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { + return nil, fmt.Errorf("Storage options overlay.size and overlay.inodes not supported. Filesystem does not support Project Quota: %v", err) + } + } else if opts.quota.Size > 0 || opts.quota.Inodes > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. + return nil, fmt.Errorf("Storage option overlay.size and overlay.inodes only supported for backingFS XFS. Found %v", backingFs) + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + trimkey := strings.ToLower(key) + trimkey = strings.TrimPrefix(trimkey, "overlay.") + trimkey = strings.TrimPrefix(trimkey, "overlay2.") + trimkey = strings.TrimPrefix(trimkey, ".") + switch trimkey { + case "override_kernel_check": + logrus.Debugf("overlay: override_kernel_check option was specified, but is no longer necessary") + case "mountopt": + o.mountOptions = val + case "size": + logrus.Debugf("overlay: size=%s", val) + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) + case "inodes": + logrus.Debugf("overlay: inodes=%s", val) + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + o.quota.Inodes = uint64(inodes) + case "imagestore", "additionalimagestore": + logrus.Debugf("overlay: imagestore=%s", val) + // Additional read only image stores to use for lower paths + if val == "" { + continue + } + for _, store := range strings.Split(val, ",") { + store = filepath.Clean(store) + if !filepath.IsAbs(store) { + return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) + } + st, err := os.Stat(store) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: image path %q must be a directory", store) + } + o.imageStores = append(o.imageStores, store) + } + case "additionallayerstore": + logrus.Debugf("overlay: additionallayerstore=%s", val) + // Additional read only layer stores to use for lower paths + if val == "" { + continue + } + for _, lstore := range strings.Split(val, ",") { + elems := strings.Split(lstore, ":") + lstore = filepath.Clean(elems[0]) + if !filepath.IsAbs(lstore) { + return nil, fmt.Errorf("overlay: additionallayerstore path %q is not absolute. Can not be relative", lstore) + } + st, err := os.Stat(lstore) + if err != nil { + return nil, errors.Wrap(err, "overlay: can't stat additionallayerstore dir") + } + if !st.IsDir() { + return nil, fmt.Errorf("overlay: additionallayerstore path %q must be a directory", lstore) + } + var withReference bool + for _, e := range elems[1:] { + switch e { + case "ref": + if withReference { + return nil, fmt.Errorf("overlay: additionallayerstore config of %q contains %q option twice", lstore, e) + } + withReference = true + default: + return nil, fmt.Errorf("overlay: additionallayerstore config %q contains unknown option %q", lstore, e) + } + } + o.layerStores = append(o.layerStores, additionalLayerStore{ + path: lstore, + withReference: withReference, + }) + } + case "mount_program": + logrus.Debugf("overlay: mount_program=%s", val) + if val != "" { + _, err := os.Stat(val) + if err != nil { + return nil, errors.Wrapf(err, "overlay: can't stat program %q", val) + } + } + o.mountProgram = val + case "skip_mount_home": + logrus.Debugf("overlay: skip_mount_home=%s", val) + o.skipMountHome, err = strconv.ParseBool(val) + case "ignore_chown_errors": + logrus.Debugf("overlay: ignore_chown_errors=%s", val) + o.ignoreChownErrors, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "force_mask": + logrus.Debugf("overlay: force_mask=%s", val) + var mask int64 + switch val { + case "shared": + mask = 0755 + case "private": + mask = 0700 + default: + mask, err = strconv.ParseInt(val, 8, 32) + if err != nil { + return nil, err + } + } + m := os.FileMode(mask) + o.forceMask = &m + default: + return nil, fmt.Errorf("overlay: Unknown option %s", key) + } + } + return o, nil +} + +func cachedFeatureSet(feature string, set bool) string { + if set { + return fmt.Sprintf("%s-true", feature) + } + return fmt.Sprintf("%s-false", feature) +} + +func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) { + content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true))) + if err == nil { + return true, string(content), nil + } + content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false))) + if err == nil { + return false, string(content), nil + } + return false, "", err +} + +func cachedFeatureRecord(runhome, feature string, supported bool, text string) (err error) { + f, err := os.Create(filepath.Join(runhome, cachedFeatureSet(feature, supported))) + if f != nil { + if text != "" { + fmt.Fprintf(f, "%s", text) + } + f.Close() + } + return err +} + +func SupportsNativeOverlay(home, runhome string) (bool, error) { + if os.Geteuid() != 0 || home == "" || runhome == "" { + return false, nil + } + + var contents string + flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home)) + if err == nil { + contents = strings.TrimSpace(string(flagContent)) + } + switch contents { + case "true": + logrus.Debugf("overlay: storage already configured with a mount-program") + return false, nil + default: + needsMountProgram, err := scanForMountProgramIndicators(home) + if err != nil && !os.IsNotExist(err) { + return false, err + } + if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) { + return false, err + } + if needsMountProgram { + return false, nil + } + // fall through to check if we find ourselves needing to use a + // mount program now + case "false": + } + + for _, dir := range []string{home, runhome} { + if _, err := os.Stat(dir); err != nil { + _ = idtools.MkdirAllAs(dir, 0700, 0, 0) + } + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return false, err + } + + supportsDType, _ := checkAndRecordOverlaySupport(fsMagic, home, runhome) + return supportsDType, nil +} + +func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { + // We can try to modprobe overlay first + + exec.Command("modprobe", "overlay").Run() + + logLevel := logrus.ErrorLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } + + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + patherr, ok := err.(*os.PathError) + if ok && patherr.Err == syscall.ENOSPC { + return false, err + } + } + if err == nil { + // Check if reading the directory's contents populates the d_type field, which is required + // for proper operation of the overlay filesystem. + supportsDType, err = fsutils.SupportsDType(layerDir) + if err != nil { + return false, err + } + if !supportsDType { + return false, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + + // Try a test mount in the specific location we're looking at using. + mergedDir := filepath.Join(layerDir, "merged") + lower1Dir := filepath.Join(layerDir, "lower1") + lower2Dir := filepath.Join(layerDir, "lower2") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + defer func() { + // Permitted to fail, since the various subdirectories + // can be empty or not even there, and the home might + // legitimately be not empty + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + _ = os.RemoveAll(layerDir) + _ = os.Remove(home) + }() + _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(upperDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(workDir, 0700, rootUID, rootGID) + flags := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", lower1Dir, lower2Dir, upperDir, workDir) + if selinux.GetEnabled() && + selinux.SecurityCheckContext(selinuxLabelTest) == nil { + // Linux 5.11 introduced unprivileged overlay mounts but it has an issue + // when used together with selinux labels. + // Check that overlay supports selinux labels as well. + flags = label.FormatMountLabel(flags, selinuxLabelTest) + } + if unshare.IsRootless() { + flags = fmt.Sprintf("%s,userxattr", flags) + } + if err := syscall.Mknod(filepath.Join(upperDir, "whiteout"), syscall.S_IFCHR|0600, int(unix.Mkdev(0, 0))); err != nil { + logrus.Debugf("Unable to create kernel-style whiteout: %v", err) + return supportsDType, errors.Wrapf(err, "unable to create kernel-style whiteout") + } + + if len(flags) < unix.Getpagesize() { + err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) + if err == nil { + logrus.Debugf("overlay: test mount with multiple lowers succeeded") + return supportsDType, nil + } + logrus.Debugf("overlay: test mount with multiple lowers failed %v", err) + } + flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir) + if selinux.GetEnabled() { + flags = label.FormatMountLabel(flags, selinuxLabelTest) + } + if len(flags) < unix.Getpagesize() { + err := unix.Mount("overlay", mergedDir, "overlay", 0, flags) + if err == nil { + logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + } + logrus.Debugf("overlay: test mount with a single lower failed %v", err) + } + logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) + } + + logrus.StandardLogger().Logf(logLevel, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") +} + +func (d *Driver) useNaiveDiff() bool { + useNaiveDiffLock.Do(func() { + if d.options.mountProgram != "" { + useNaiveDiffOnly = true + return + } + feature := fmt.Sprintf("native-diff(%s)", d.options.mountOptions) + nativeDiffCacheResult, nativeDiffCacheText, err := cachedFeatureCheck(d.runhome, feature) + if err == nil { + if nativeDiffCacheResult { + logrus.Debugf("Cached value indicated that native-diff is usable") + } else { + logrus.Debugf("Cached value indicated that native-diff is not being used") + logrus.Info(nativeDiffCacheText) + } + useNaiveDiffOnly = !nativeDiffCacheResult + return + } + if err := doesSupportNativeDiff(d.home, d.options.mountOptions); err != nil { + nativeDiffCacheText = fmt.Sprintf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) + logrus.Info(nativeDiffCacheText) + useNaiveDiffOnly = true + } + cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText) + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return d.name +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())}, + {"Using metacopy", strconv.FormatBool(d.usingMetacopy)}, + } +} + +// Metadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + _ = os.RemoveAll(d.getStagingDir()) + return mount.Unmount(d.home) +} + +// LookupAdditionalLayer looks up additional layer store by the specified +// digest and ref and returns an object representing that layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) { + l, err := d.getAdditionalLayerPath(dgst, ref) + if err != nil { + return nil, err + } + // Tell the additional layer store that we use this layer. + // This will increase reference counter on the store's side. + // This will be decreased on Release() method. + notifyUseAdditionalLayer(l) + return &additionalLayer{ + path: l, + d: d, + }, nil +} + +// LookupAdditionalLayerByID looks up additional layer store by the specified +// ID and returns an object representing that layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (d *Driver) LookupAdditionalLayerByID(id string) (graphdriver.AdditionalLayer, error) { + l, err := d.getAdditionalLayerPathByID(id) + if err != nil { + return nil, err + } + // Tell the additional layer store that we use this layer. + // This will increase reference counter on the store's side. + // This will be decreased on Release() method. + notifyUseAdditionalLayer(l) + return &additionalLayer{ + path: l, + d: d, + }, nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + if _, ok := opts.StorageOpt["inodes"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["inodes"] = strconv.FormatUint(d.options.quota.Inodes, 10) + } + + return d.create(id, parent, opts, false) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } + + if _, ok := opts.StorageOpt["inodes"]; ok { + return fmt.Errorf("--storage-opt inodes is only supported for ReadWrite Layers") + } + } + + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { + dir := d.dir(id) + + uidMaps := d.uidMaps + gidMaps := d.gidMaps + + if opts != nil && opts.IDMappings != nil { + uidMaps = opts.IDMappings.UIDs() + gidMaps = opts.IDMappings.GIDs() + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, + } + + // Make the link directory if it does not exist + if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil { + return err + } + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil { + return err + } + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootUID = int(st.UID()) + rootGID = int(st.GID()) + } + + if _, err := system.Lstat(dir); err == nil { + logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) + // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, + // so that we can’t end up with two symlinks in linkDir pointing to the same layer. + if err := d.Remove(id); err != nil { + return errors.Wrapf(err, "removing a pre-existing layer directory %q", dir) + } + } + + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + if err2 := os.RemoveAll(dir); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) + } + } + }() + + if d.quotaCtl != nil && !disableQuota { + quota := quota.Quota{} + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + if driver.options.quota.Size > 0 { + quota.Size = driver.options.quota.Size + } + if driver.options.quota.Inodes > 0 { + quota.Inodes = driver.options.quota.Inodes + } + } + // Set container disk quota limit + // If it is set to 0, we will track the disk usage, but not enforce a limit + if err := d.quotaCtl.SetQuota(dir, quota); err != nil { + return err + } + } + + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + if parent != "" { + st, err := system.Stat(filepath.Join(d.dir(parent), "diff")) + if err != nil { + return err + } + perms = os.FileMode(st.Mode()) + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + // if no parent directory, create a dummy lower directory and skip writing a "lowers" file + if parent == "" { + return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID) + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + case "inodes": + inodes, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + driver.options.quota.Inodes = uint64(inodes) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(parentDir, "link")) + if err := d.recreateSymlinks(); err != nil { + return "", errors.Wrap(err, "recreating the links") + } + parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + p, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, bool) { + newpath := path.Join(d.home, id) + if _, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + l := path.Join(p, d.name, id) + _, err = os.Stat(l) + if err == nil { + return l, true + } + } + } + return newpath, false +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lower := d.dir(s) + lp, err := os.Readlink(lower) + // if the link does not exist, we lost the symlinks during a sudden reboot. + // Let's go ahead and recreate those symlinks. + if err != nil { + if os.IsNotExist(err) { + logrus.Warnf("Can't read link %q because it does not exist. A storage corruption might have occurred, attempting to recreate the missing symlinks. It might be best wipe the storage to avoid further errors due to storage corruption.", lower) + if err := d.recreateSymlinks(); err != nil { + return nil, fmt.Errorf("recreating the missing symlinks: %v", err) + } + // let's call Readlink on lower again now that we have recreated the missing symlinks + lp, err = os.Readlink(lower) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMap) string { + if uidMaps == nil { + uidMaps = d.uidMaps + } + if gidMaps == nil { + gidMaps = d.gidMaps + } + if uidMaps != nil { + var uids, gids bytes.Buffer + if len(uidMaps) == 1 && uidMaps[0].Size == 1 { + uids.WriteString(fmt.Sprintf("squash_to_uid=%d", uidMaps[0].HostID)) + } else { + uids.WriteString("uidmapping=") + for _, i := range uidMaps { + if uids.Len() > 0 { + uids.WriteString(":") + } + uids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + } + if len(gidMaps) == 1 && gidMaps[0].Size == 1 { + gids.WriteString(fmt.Sprintf("squash_to_gid=%d", gidMaps[0].HostID)) + } else { + gids.WriteString("gidmapping=") + for _, i := range gidMaps { + if gids.Len() > 0 { + gids.WriteString(":") + } + gids.WriteString(fmt.Sprintf("%d:%d:%d", i.ContainerID, i.HostID, i.Size)) + } + } + return fmt.Sprintf("%s,%s,%s", opts, uids.String(), gids.String()) + } + return opts +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + d.releaseAdditionalLayerByID(id) + + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// recreateSymlinks goes through the driver's home directory and checks if the diff directory +// under each layer has a symlink created for it under the linkDir. If the symlink does not +// exist, it creates them +func (d *Driver) recreateSymlinks() error { + // We have at most 3 corrective actions per layer, so 10 iterations is plenty. + const maxIterations = 10 + + // List all the directories under the home directory + dirs, err := ioutil.ReadDir(d.home) + if err != nil { + return fmt.Errorf("reading driver home directory %q: %v", d.home, err) + } + linksDir := filepath.Join(d.home, "l") + // This makes the link directory if it doesn't exist + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + return err + } + // Keep looping as long as we take some corrective action in each iteration + var errs *multierror.Error + madeProgress := true + iterations := 0 + for madeProgress { + errs = nil + madeProgress = false + // Check that for each layer, there's a link in "l" with the name in + // the layer's "link" file that points to the layer's "diff" directory. + for _, dir := range dirs { + // Skip over the linkDir and anything that is not a directory + if dir.Name() == linkDir || !dir.Mode().IsDir() { + continue + } + // Read the "link" file under each layer to get the name of the symlink + data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) + if err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name())) + continue + } + linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) + // Check if the symlink exists, and if it doesn't, create it again with the + // name we got from the "link" file + _, err = os.Lstat(linkPath) + if err != nil && os.IsNotExist(err) { + if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { + errs = multierror.Append(errs, err) + continue + } + madeProgress = true + } else if err != nil { + errs = multierror.Append(errs, err) + continue + } + } + // Now check if we somehow lost a "link" file, by making sure + // that each symlink we have corresponds to one. + links, err := ioutil.ReadDir(linksDir) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + // Go through all of the symlinks in the "l" directory + for _, link := range links { + // Read the symlink's target, which should be "../$layer/diff" + target, err := os.Readlink(filepath.Join(linksDir, link.Name())) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + targetComponents := strings.Split(target, string(os.PathSeparator)) + if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { + errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) + // force the link to be recreated on the next pass + if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil { + if !os.IsNotExist(err) { + errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link)) + } // else don’t report any error, but also don’t set madeProgress. + continue + } + madeProgress = true + continue + } + // Reconstruct the name of the target's link file and check that + // it has the basename of our symlink in it. + targetID := targetComponents[1] + linkFile := filepath.Join(d.dir(targetID), "link") + data, err := ioutil.ReadFile(linkFile) + if err != nil || string(data) != link.Name() { + // NOTE: If two or more links point to the same target, we will update linkFile + // with every value of link.Name(), and set madeProgress = true every time. + if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID)) + continue + } + madeProgress = true + } + } + iterations++ + if iterations >= maxIterations { + errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + break + } + } + if errs != nil { + return errs.ErrorOrNil() + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + return d.get(id, false, options) +} + +func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { + dir, inAdditionalStore := d.dir2(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + readWrite := !inAdditionalStore + + if !d.SupportsShifting() || options.DisableShifting { + disableShifting = true + } + + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } + optsList := options.Options + if len(optsList) == 0 { + optsList = strings.Split(d.options.mountOptions, ",") + } else { + // If metacopy=on is present in d.options.mountOptions it must be present in the mount + // options otherwise the kernel refuses to follow the metacopy xattr. + if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { + if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") + optsList = append(optsList, "metacopy=on") + } + } + } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel") + } + optsList = stripOption(optsList, "metacopy=on") + } + + for _, o := range optsList { + if o == "ro" { + readWrite = false + break + } + } + + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil && !os.IsNotExist(err) { + return "", err + } + splitLowers := strings.Split(string(lowers), ":") + if len(splitLowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + + // absLowers is the list of lowers as absolute paths, which works well with additional stores. + absLowers := []string{} + // relLowers is the list of lowers as paths relative to the driver's home directory. + relLowers := []string{} + + // Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers + // lists that we're building. "diff" itself is the upper, so it won't be in the lists. + link, err := ioutil.ReadFile(path.Join(dir, "link")) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Warnf("Can't read parent link %q because it does not exist. Going through storage to recreate the missing links.", path.Join(dir, "link")) + if err := d.recreateSymlinks(); err != nil { + return "", errors.Wrap(err, "recreating the links") + } + link, err = ioutil.ReadFile(path.Join(dir, "link")) + if err != nil { + return "", err + } + } + diffN := 1 + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + permsKnown := false + st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + if err == nil { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + for err == nil { + absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN))) + diffN++ + st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) + if err == nil && !permsKnown { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + } + + // For each lower, resolve its path, and append it and any additional diffN + // directories to the lowers list. + for _, l := range splitLowers { + if l == "" { + continue + } + lower := "" + newpath := path.Join(d.home, l) + if st, err := os.Stat(newpath); err != nil { + for _, p := range d.AdditionalImageStores() { + lower = path.Join(p, d.name, l) + if st2, err2 := os.Stat(lower); err2 == nil { + if !permsKnown { + perms = os.FileMode(st2.Mode()) + permsKnown = true + } + break + } + lower = "" + } + // if it is a "not found" error, that means the symlinks were lost in a sudden reboot + // so call the recreateSymlinks function to go through all the layer dirs and recreate + // the symlinks with the name from their respective "link" files + if lower == "" && os.IsNotExist(err) { + logrus.Warnf("Can't stat lower layer %q because it does not exist. Going through storage to recreate the missing symlinks.", newpath) + if err := d.recreateSymlinks(); err != nil { + return "", fmt.Errorf("Recreating the missing symlinks: %v", err) + } + lower = newpath + } else if lower == "" { + return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) + } + } else { + if !permsKnown { + perms = os.FileMode(st.Mode()) + permsKnown = true + } + lower = newpath + } + absLowers = append(absLowers, lower) + relLowers = append(relLowers, l) + diffN = 1 + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + for err == nil { + absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(l, "..", nameWithSuffix("diff", diffN))) + diffN++ + _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + } + } + + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) + relLowers = append(relLowers, path.Join(id, "empty")) + } + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + diffDir := path.Join(dir, "diff") + if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { + return "", err + } + + mergedDir := path.Join(dir, "merged") + // Create the driver merged dir + if err := idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return "", err + } + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + logrus.Errorf("Unmounting %v: %v", mergedDir, mntErr) + } + } + } + }() + + workdir := path.Join(dir, "work") + + if d.options.mountProgram == "" && unshare.IsRootless() { + optsList = append(optsList, "userxattr") + } + + if options.Volatile && !hasVolatileOption(optsList) { + supported, err := d.getSupportsVolatile() + if err != nil { + return "", err + } + // If "volatile" is not supported by the file system, just ignore the request + if supported { + optsList = append(optsList, "volatile") + } + } + + if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 { + var newAbsDir []string + mappedRoot := filepath.Join(d.home, id, "mapped") + if err := os.MkdirAll(mappedRoot, 0700); err != nil { + return "", err + } + + pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + defer cleanupFunc() + + idMappedMounts := make(map[string]string) + + // rewrite the lower dirs to their idmapped mount. + c := 0 + for _, absLower := range absLowers { + mappedMountSrc := getMappedMountRoot(absLower) + + root, found := idMappedMounts[mappedMountSrc] + if !found { + root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) + c++ + if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil { + return "", errors.Wrapf(err, "create mapped mount for %q on %q", mappedMountSrc, root) + } + idMappedMounts[mappedMountSrc] = root + + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(root, unix.MNT_DETACH) + } + + // relative path to the layer through the id mapped mount + rel, err := filepath.Rel(mappedMountSrc, absLower) + if err != nil { + return "", err + } + + newAbsDir = append(newAbsDir, filepath.Join(root, rel)) + } + absLowers = newAbsDir + } + + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + } + + mountData := label.FormatMountLabel(opts, options.MountLabel) + mountFunc := unix.Mount + mountTarget := mergedDir + + pageSize := unix.Getpagesize() + + if d.options.mountProgram != "" { + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + if !disableShifting { + label = d.optsAppendMappings(label, options.UidMaps, options.GidMaps) + } + + // if forceMask is in place, tell fuse-overlayfs to write the permissions mask to an unprivileged xattr as well. + if d.options.forceMask != nil { + label = label + ",xattr_permissions=2" + } + + mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) + mountProgram.Dir = d.home + var b bytes.Buffer + mountProgram.Stderr = &b + err := mountProgram.Run() + if err != nil { + output := b.String() + if output == "" { + output = "" + } + return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output) + } + return nil + } + } else if len(mountData) >= pageSize { + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + + workdir = path.Join(id, "work") + //FIXME: We need to figure out to get this to work with additional stores + if readWrite { + diffDir := path.Join(id, "diff") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + } + mountData = label.FormatMountLabel(opts, options.MountLabel) + if len(mountData) >= pageSize { + return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize) + } + mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + // overlay has a check in place to prevent mounting the same file system twice + // if volatile was already specified. + err = os.RemoveAll(filepath.Join(workdir, "work/incompat/volatile")) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + flags, data := mount.ParseOptions(mountData) + logrus.Debugf("overlay: mount_data=%s", mountData) + if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil { + return "", fmt.Errorf("creating overlay mount to %s, mount_data=%q: %v", mountTarget, mountData, err) + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } + mountpoint := path.Join(dir, "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + return err + } + + unmounted := false + + mappedRoot := filepath.Join(d.home, id, "mapped") + // It should not happen, but cleanup any mapped mount if it was leaked. + if _, err := os.Stat(mappedRoot); err == nil { + mounts, err := ioutil.ReadDir(mappedRoot) + if err == nil { + // Go through all of the mapped mounts. + for _, m := range mounts { + _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH) + } + } + } + + if d.options.mountProgram != "" { + // Attempt to unmount the FUSE mount using either fusermount or fusermount3. + // If they fail, fallback to unix.Unmount + for _, v := range []string{"fusermount3", "fusermount"} { + err := exec.Command(v, "-u", mountpoint).Run() + if err != nil && errors.Cause(err) != exec.ErrNotFound { + logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err) + } + if err == nil { + unmounted = true + break + } + } + // If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all + // pending changes are propagated to the file system + if !unmounted { + fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0) + if err == nil { + if err := unix.Syncfs(fd); err != nil { + logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err) + } + unix.Close(fd) + } + } + } + + if !unmounted { + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + } + + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) + } + + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { + whiteoutFormat := archive.OverlayWhiteoutFormat + if d.options.mountProgram != "" { + // If we are using a mount program, we are most likely running + // as an unprivileged user that cannot use mknod, so fallback to the + // AUFS whiteout format. + whiteoutFormat = archive.AUFSWhiteoutFormat + } + return whiteoutFormat +} + +type overlayFileGetter struct { + diffDirs []string +} + +func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + for _, d := range g.diffDirs { + f, err := os.Open(filepath.Join(d, path)) + if err == nil { + return f, nil + } + } + if len(g.diffDirs) > 0 { + return os.Open(filepath.Join(g.diffDirs[0], path)) + } + return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) +} + +func (g *overlayFileGetter) Close() error { + return nil +} + +func (d *Driver) getStagingDir() string { + return filepath.Join(d.home, "staging") +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences, either for this layer, or one of our +// lowers if we're just a template directory. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + paths, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil +} + +// CleanupStagingDirectory cleanups the staging directory. +func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { + return os.RemoveAll(stagingDirectory) +} + +// ApplyDiff applies the changes in the new layer using the specified function +func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { + var idMappings *idtools.IDMappings + if options != nil { + idMappings = options.Mappings + } + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + applyDir := "" + + if id == "" { + err := os.MkdirAll(d.getStagingDir(), 0700) + if err != nil && !os.IsExist(err) { + return graphdriver.DriverWithDifferOutput{}, err + } + applyDir, err = ioutil.TempDir(d.getStagingDir(), "") + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + } else { + var err error + applyDir, err = d.getDiffPath(id) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + } + + logrus.Debugf("Applying differ in %s", applyDir) + + out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + IgnoreChownErrors: d.options.ignoreChownErrors, + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: userns.RunningInUserNS(), + }) + out.Target = applyDir + return out, err +} + +// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error { + if filepath.Dir(stagingDirectory) != d.getStagingDir() { + return fmt.Errorf("%q is not a staging directory", stagingDirectory) + } + + diff, err := d.getDiffPath(id) + if err != nil { + return err + } + if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) { + return err + } + return os.Rename(stagingDirectory, diff) +} + +// DifferTarget gets the location where files are stored for the layer. +func (d *Driver) DifferTarget(id string) (string, error) { + return d.getDiffPath(id) +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + + if !d.isParent(id, parent) { + if d.options.ignoreChownErrors { + options.IgnoreChownErrors = d.options.ignoreChownErrors + } + if d.options.forceMask != nil { + options.ForceMask = d.options.forceMask + } + return d.naiveDiff.ApplyDiff(id, parent, options) + } + + idMappings := options.Mappings + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + applyDir, err := d.getDiffPath(id) + if err != nil { + return 0, err + } + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(options.Diff, applyDir, &archive.TarOptions{ + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + IgnoreChownErrors: d.options.ignoreChownErrors, + ForceMask: d.options.forceMask, + WhiteoutFormat: d.getWhiteoutFormat(), + InUserNS: userns.RunningInUserNS(), + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) (string, error) { + dir := d.dir(id) + return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) +} + +func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + for i, l := range layers { + layers[i], err = redirectDiffIfAdditionalLayer(l) + if err != nil { + return nil, err + } + } + return layers, nil +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + if d.options.mountProgram == "" && (d.useNaiveDiff() || !d.isParent(id, parent)) { + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) + } + + p, err := d.getDiffPath(id) + if err != nil { + return 0, err + } + return directory.Size(p) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) + } + + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + + lowerDirs, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + + diffPath, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: idMappings.UIDs(), + GIDMaps: idMappings.GIDs(), + WhiteoutFormat: d.getWhiteoutFormat(), + WhiteoutData: lowerDirs, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + if d.useNaiveDiff() || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath, err := d.getDiffPath(id) + if err != nil { + return nil, err + } + layers, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return d.options.imageStores +} + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + var err error + dir := d.dir(id) + diffDir := filepath.Join(dir, "diff") + + rootUID, rootGID := 0, 0 + if toHost != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(toHost.UIDs(), toHost.GIDs()) + if err != nil { + return err + } + } + + // Mount the new layer and handle ownership changes and possible copy_ups in it. + options := graphdriver.MountOpts{ + MountLabel: mountLabel, + Options: strings.Split(d.options.mountOptions, ","), + } + layerFs, err := d.get(id, true, options) + if err != nil { + return err + } + err = graphdriver.ChownPathByMaps(layerFs, toContainer, toHost) + if err != nil { + if err2 := d.Put(id); err2 != nil { + logrus.Errorf("%v; unmounting %v: %v", err, id, err2) + } + return err + } + if err = d.Put(id); err != nil { + return err + } + + // Rotate the diff directories. + i := 0 + perms := defaultPerms + st, err := os.Stat(nameWithSuffix(diffDir, i)) + if d.options.forceMask != nil { + perms = *d.options.forceMask + } else { + if err == nil { + perms = os.FileMode(st.Mode()) + } + } + for err == nil { + i++ + _, err = os.Stat(nameWithSuffix(diffDir, i)) + } + + for i > 0 { + err = os.Rename(nameWithSuffix(diffDir, i-1), nameWithSuffix(diffDir, i)) + if err != nil { + return err + } + i-- + } + + // We need to re-create the work directory as it might keep a reference + // to the old upper layer in the index. + workDir := filepath.Join(dir, "work") + if err := os.RemoveAll(workDir); err == nil { + if err := idtools.MkdirAs(workDir, defaultPerms, rootUID, rootGID); err != nil { + return err + } + } + + // Re-create the directory that we're going to use as the upper layer. + if err := idtools.MkdirAs(diffDir, perms, rootUID, rootGID); err != nil { + return err + } + return nil +} + +// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with +// overlay lower layers. +func (d *Driver) supportsIDmappedMounts() bool { + if d.supportsIDMappedMounts != nil { + return *d.supportsIDMappedMounts + } + + supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome) + d.supportsIDMappedMounts = &supportsIDMappedMounts + if err == nil { + return supportsIDMappedMounts + } + logrus.Debugf("Check for idmapped mounts support %v", err) + return false +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { + return true + } + if d.options.mountProgram != "" { + return true + } + return d.supportsIDmappedMounts() +} + +// dumbJoin is more or less a dumber version of filepath.Join, but one which +// won't Clean() the path, allowing us to append ".." as a component and trust +// pathname resolution to do some non-obvious work. +func dumbJoin(names ...string) string { + if len(names) == 0 { + return string(os.PathSeparator) + } + return strings.Join(names, string(os.PathSeparator)) +} + +func nameWithSuffix(name string, number int) string { + if number == 0 { + return name + } + return fmt.Sprintf("%s%d", name, number) +} + +func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, error) { + refElem := base64.StdEncoding.EncodeToString([]byte(ref)) + for _, ls := range d.options.layerStores { + ref := "" + if ls.withReference { + ref = refElem + } + target := path.Join(ls.path, ref, dgst.String()) + // Check if all necessary files exist + for _, p := range []string{ + filepath.Join(target, "diff"), + filepath.Join(target, "info"), + filepath.Join(target, "blob"), + } { + if _, err := os.Stat(p); err != nil { + return "", errors.Wrapf(graphdriver.ErrLayerUnknown, + "failed to stat additional layer %q: %v", p, err) + } + } + return target, nil + } + + return "", errors.Wrapf(graphdriver.ErrLayerUnknown, + "additional layer (%q, %q) not found", dgst, ref) +} + +func (d *Driver) releaseAdditionalLayerByID(id string) { + if al, err := d.getAdditionalLayerPathByID(id); err == nil { + notifyReleaseAdditionalLayer(al) + } else if !os.IsNotExist(err) { + logrus.Warnf("Unexpected error on reading Additional Layer Store pointer %v", err) + } +} + +// additionalLayer represents a layer in Additional Layer Store. +type additionalLayer struct { + path string + d *Driver + releaseOnce sync.Once +} + +// Info returns arbitrary information stored along with this layer (i.e. `info` file). +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) Info() (io.ReadCloser, error) { + return os.Open(filepath.Join(al.path, "info")) +} + +// Blob returns a reader of the raw contents of this layer. +func (al *additionalLayer) Blob() (io.ReadCloser, error) { + return os.Open(filepath.Join(al.path, "blob")) +} + +// CreateAs creates a new layer from this additional layer. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) CreateAs(id, parent string) error { + // TODO: support opts + if err := al.d.Create(id, parent, nil); err != nil { + return err + } + dir := al.d.dir(id) + diffDir := path.Join(dir, "diff") + if err := os.RemoveAll(diffDir); err != nil { + return err + } + // tell the additional layer store that we use this layer. + // mark this layer as "additional layer" + if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil { + return err + } + notifyUseAdditionalLayer(al.path) + return os.Symlink(filepath.Join(al.path, "diff"), diffDir) +} + +func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) { + al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer")) + if err != nil { + return "", err + } + return string(al), nil +} + +// Release tells the additional layer store that we don't use this handler. +// This API is experimental and can be changed without bumping the major version number. +// TODO: to remove the comment once it's no longer experimental. +func (al *additionalLayer) Release() { + // Tell the additional layer store that we don't use this layer handler. + // This will decrease the reference counter on the store's side, which was + // increased in LookupAdditionalLayer (so this must be called only once). + al.releaseOnce.Do(func() { + notifyReleaseAdditionalLayer(al.path) + }) +} + +// notifyUseAdditionalLayer notifies Additional Layer Store that we use the specified layer. +// This is done by creating "use" file in the layer directory. This is useful for +// Additional Layer Store to consider when to perform GC. Notification-aware Additional +// Layer Store must return ENOENT. +func notifyUseAdditionalLayer(al string) { + if !path.IsAbs(al) { + logrus.Warnf("additionallayer must be absolute (got: %v)", al) + return + } + useFile := path.Join(al, "use") + f, err := os.Create(useFile) + if os.IsNotExist(err) { + return + } else if err == nil { + f.Close() + if err := os.Remove(useFile); err != nil { + logrus.Warnf("Failed to remove use file") + } + } + logrus.Warnf("Unexpected error by Additional Layer Store %v during use; GC doesn't seem to be supported", err) +} + +// notifyReleaseAdditionalLayer notifies Additional Layer Store that we don't use the specified +// layer anymore. This is done by rmdir-ing the layer directory. This is useful for +// Additional Layer Store to consider when to perform GC. Notification-aware Additional +// Layer Store must return ENOENT. +func notifyReleaseAdditionalLayer(al string) { + if !path.IsAbs(al) { + logrus.Warnf("additionallayer must be absolute (got: %v)", al) + return + } + // tell the additional layer store that we don't use this layer anymore. + err := unix.Rmdir(al) + if os.IsNotExist(err) { + return + } + logrus.Warnf("Unexpected error by Additional Layer Store %v during release; GC doesn't seem to be supported", err) +} + +// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and +// returns the redirected path. If the passed diff is not the one in Additional Layer +// Store, it returns the original path without changes. +func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { + if ld, err := os.Readlink(diffPath); err == nil { + // diff is the link to Additional Layer Store + if !path.IsAbs(ld) { + return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld) + } + diffPath = ld + } else if err.(*os.PathError).Err != syscall.EINVAL { + return "", err + } + return diffPath, nil +} + +// getMappedMountRoot is a heuristic that calculates the parent directory where +// the idmapped mount should be applied. +// It is useful to minimize the number of idmapped mounts and at the same time use +// a common path as long as possible to reduce the length of the mount data argument. +func getMappedMountRoot(path string) string { + dirName := filepath.Dir(path) + if filepath.Base(dirName) == linkDir { + return filepath.Dir(dirName) + } + return dirName +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go new file mode 100644 index 00000000000..0b70a5d92bc --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_cgo.go @@ -0,0 +1,21 @@ +// +build linux,cgo + +package overlay + +import ( + "path" + + "github.com/containers/storage/pkg/directory" +) + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For Overlay, it attempts to check the XFS quota for size, and falls back to +// finding the size of the "diff" directory. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + usage := &directory.DiskUsage{} + if d.quotaCtl != nil { + err := d.quotaCtl.GetDiskUsage(d.dir(id), usage) + return usage, err + } + return directory.Usage(path.Join(d.dir(id), "diff")) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go new file mode 100644 index 00000000000..1cdac777751 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -0,0 +1,16 @@ +// +build linux,!cgo + +package overlay + +import ( + "path" + + "github.com/containers/storage/pkg/directory" +) + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For Overlay, it attempts to check the XFS quota for size, and falls back to +// finding the size of the "diff" directory. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(path.Join(d.dir(id), "diff")) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go new file mode 100644 index 00000000000..49af84a229f --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package overlay + +func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { + return false, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go new file mode 100644 index 00000000000..736c48b9c1a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/randomid.go @@ -0,0 +1,81 @@ +// +build linux + +package overlay + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("Generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == unix.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go new file mode 100644 index 00000000000..9fc57b36bf9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go @@ -0,0 +1,20 @@ +// +build linux + +package overlayutils + +import ( + "fmt" + + graphdriver "github.com/containers/storage/drivers" + "github.com/pkg/errors" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type is not supported." + return errors.Wrap(graphdriver.ErrNotSupported, msg) +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go new file mode 100644 index 00000000000..0609f970c28 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -0,0 +1,404 @@ +// +build linux,!exclude_disk_quota,cgo + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef FS_PROJ_QUOTA +#define FS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/directory" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const projectIDsAllocatedPerQuotaHome = 10000 + +// Quota limit params - currently we only control blocks hard limit and inodes +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// Attempt to generate a unigue projectid. Multiple directories +// per file system can have quota and they need a group of unique +// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) +// unique projectids, based on the inode of the basepath. +func generateUniqueProjectID(path string) (uint32, error) { + fileinfo, err := os.Stat(path) + if err != nil { + return 0, err + } + stat, ok := fileinfo.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("Not a syscall.Stat_t %s", path) + + } + projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) + return uint32(projectID), nil +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the basePath directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects +// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects +// echo storage:100000 >> /etc/projid +// echo volumes:200000 >> /etc/projid +// xfs_quota -x -c 'project -s storage volumes' / +// +// In the example above, the storage directory project id will be used as a +// "start offset" and all containers will be assigned larger project ids +// (e.g. >= 100000). Then the volumes directory project id will be used as a +// "start offset" and all volumes will be assigned larger project ids +// (e.g. >= 200000). +// This is a way to prevent xfs_quota management from conflicting with +// containers/storage. + +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + if minProjectID == 0 { + // Indicates the storage was never initialized + // Generate a unique range of Projectids for this basepath + minProjectID, err = generateUniqueProjectID(basePath) + if err != nil { + return nil, err + } + + } + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + Inodes: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.FS_PROJ_QUOTA + + if quota.Size > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + } + if quota.Inodes > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT + d.d_ino_hardlimit = C.__u64(quota.Inodes) + d.d_ino_softlimit = d.d_ino_hardlimit + } + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + quota.Inodes = uint64(d.d_ino_hardlimit) + return nil +} + +// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota +func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + usage.Size = int64(d.d_bcount) * 512 + usage.InodeCount = int64(d.d_icount) + + return nil +} + +func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { + var d C.fs_disk_quota_t + + projectID, ok := q.quotas[targetPath] + if !ok { + return d, fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return d, fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + + return d, nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case someone copied the home directory over to a new device + unix.Unlink(backingFsBlockDev) + if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go new file mode 100644 index 00000000000..7469138db7a --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_unsupported.go @@ -0,0 +1,33 @@ +// +build !linux exclude_disk_quota !cgo + +package quota + +import ( + "github.com/pkg/errors" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { +} + +func NewControl(basePath string) (*Control, error) { + return nil, errors.New("filesystem does not support, or has not enabled quotas") +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + return errors.New("filesystem does not support, or has not enabled quotas") +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + return errors.New("filesystem does not support, or has not enabled quotas") +} diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go new file mode 100644 index 00000000000..7743dcedbd0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/containers/storage/drivers/aufs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go new file mode 100644 index 00000000000..40ff1cdd0df --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/containers/storage/drivers/btrfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go new file mode 100644 index 00000000000..cefe2e8c754 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux,cgo + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/containers/storage/drivers/devmapper" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go new file mode 100644 index 00000000000..30e3b4d7475 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_overlay.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay,linux,cgo + +package register + +import ( + // register the overlay graphdriver + _ "github.com/containers/storage/drivers/overlay" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/github.com/containers/storage/drivers/register/register_vfs.go new file mode 100644 index 00000000000..691ce859291 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/containers/storage/drivers/vfs" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_windows.go b/vendor/github.com/containers/storage/drivers/register/register_windows.go new file mode 100644 index 00000000000..048b27097d1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/containers/storage/drivers/windows" +) diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go new file mode 100644 index 00000000000..4623e7f4648 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris + +package register + +import ( + // register the zfs driver + _ "github.com/containers/storage/drivers/zfs" +) diff --git a/vendor/github.com/containers/storage/drivers/template.go b/vendor/github.com/containers/storage/drivers/template.go new file mode 100644 index 00000000000..d40d71cfc1e --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/template.go @@ -0,0 +1,52 @@ +package graphdriver + +import ( + "github.com/sirupsen/logrus" + + "github.com/containers/storage/pkg/idtools" +) + +// TemplateDriver is just barely enough of a driver that we can implement a +// naive version of CreateFromTemplate on top of it. +type TemplateDriver interface { + DiffDriver + CreateReadWrite(id, parent string, opts *CreateOpts) error + Create(id, parent string, opts *CreateOpts) error + Remove(id string) error +} + +// CreateFromTemplate creates a layer with the same contents and parent as +// another layer. Internally, it may even depend on that other layer +// continuing to exist, as if it were actually a child of the child layer. +func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error { + var err error + if readWrite { + err = d.CreateReadWrite(id, parent, opts) + } else { + err = d.Create(id, parent, opts) + } + if err != nil { + return err + } + diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel) + if err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("Removing layer %q: %v", id, err2) + } + return err + } + + applyOptions := ApplyDiffOpts{ + Diff: diff, + Mappings: templateIDMappings, + MountLabel: opts.MountLabel, + IgnoreChownErrors: opts.ignoreChownErrors, + } + if _, err = d.ApplyDiff(id, parent, applyOptions); err != nil { + if err2 := d.Remove(id); err2 != nil { + logrus.Errorf("Removing layer %q: %v", id, err2) + } + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go new file mode 100644 index 00000000000..bf22a5f6fd5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_linux.go @@ -0,0 +1,7 @@ +package vfs + +import "github.com/containers/storage/drivers/copy" + +func dirCopy(srcDir, dstDir string) error { + return copy.DirCopy(srcDir, dstDir, copy.Content, true) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go new file mode 100644 index 00000000000..8ac80ee1dba --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package vfs // import "github.com/containers/storage/drivers/vfs" + +import "github.com/containers/storage/pkg/chrootarchive" + +func dirCopy(srcDir, dstDir string) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go new file mode 100644 index 00000000000..1b58e2f63e1 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -0,0 +1,299 @@ +package vfs + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" +) + +var ( + // CopyDir defines the copy method to use. + CopyDir = dirCopy +) + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { + d := &Driver{ + name: "vfs", + homes: []string{home}, + idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + } + + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { + return nil, err + } + for _, option := range options.DriverOptions { + + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "vfs.imagestore", ".imagestore": + d.homes = append(d.homes, strings.Split(val, ",")...) + continue + case "vfs.mountopt": + return nil, fmt.Errorf("vfs driver does not support mount options") + case ".ignore_chown_errors", "vfs.ignore_chown_errors": + logrus.Debugf("vfs: ignore_chown_errors=%s", val) + var err error + d.ignoreChownErrors, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("vfs driver does not support %s options", key) + } + } + d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) + + return d, nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + name string + homes []string + idMappings *idtools.IDMappings + ignoreChownErrors bool + naiveDiff graphdriver.DiffDriver + updater graphdriver.LayerIDMapUpdater +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) Metadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := d.dir(id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + if readWrite { + return d.CreateReadWrite(id, template, opts) + } + return d.Create(id, template, opts) +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { + if d.ignoreChownErrors { + options.IgnoreChownErrors = d.ignoreChownErrors + } + return d.naiveDiff.ApplyDiff(id, parent, options) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, false) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) (retErr error) { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + idMappings := d.idMappings + if opts != nil && opts.IDMappings != nil { + idMappings = opts.IDMappings + } + + dir := d.dir(id) + rootIDs := idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { + return err + } + + defer func() { + if retErr != nil { + os.RemoveAll(dir) + } + }() + + rootPerms := defaultPerms + if parent != "" { + st, err := system.Stat(d.dir(parent)) + if err != nil { + return err + } + rootPerms = os.FileMode(st.Mode()) + rootIDs.UID = int(st.UID()) + rootIDs.GID = int(st.GID()) + } + if err := idtools.MkdirAndChown(dir, rootPerms, rootIDs); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent != "" { + parentDir, err := d.Get(parent, graphdriver.MountOpts{}) + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := dirCopy(parentDir, dir); err != nil { + return err + } + } + + return nil + +} + +func (d *Driver) dir(id string) string { + for i, home := range d.homes { + if i > 0 { + home = filepath.Join(home, d.String()) + } + candidate := filepath.Join(home, "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } + } + return filepath.Join(d.homes[0], "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + return system.EnsureRemoveAll(d.dir(id)) +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + dir := d.dir(id) + switch len(options.Options) { + case 0: + case 1: + if options.Options[0] == "ro" { + // ignore "ro" option + break + } + fallthrough + default: + return "", fmt.Errorf("vfs driver does not support mount options") + } + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For VFS, it queries the directory for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.dir(id)) +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + if len(d.homes) > 1 { + return d.homes[1:] + } + return nil +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + return d.updater.SupportsShifting() +} + +// UpdateLayerIDMap updates ID mappings in a from matching the ones specified +// by toContainer to those specified by toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return d.updater.UpdateLayerIDMap(id, toContainer, toHost, mountLabel) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + return d.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { + return d.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + return d.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) +} diff --git a/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go b/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go new file mode 100644 index 00000000000..fa141263019 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/windows/jsoniter_windows.go @@ -0,0 +1,5 @@ +package windows + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go new file mode 100644 index 00000000000..1491517413c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -0,0 +1,1006 @@ +//+build windows + +package windows + +import ( + "archive/tar" + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/longpath" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + units "github.com/docker/go-units" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options graphdriver.Options) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + for _, option := range options.DriverOptions { + if strings.HasPrefix(option, "windows.mountopt=") { + return nil, fmt.Errorf("windows driver does not support mount options") + } else { + return nil, fmt.Errorf("option %s not supported", option) + } + } + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = windows.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = windows.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// panicIfUsedByLcow does exactly what it says. +// TODO @jhowardmsft - this is a temporary measure for the bring-up of +// Linux containers on Windows. It is a failsafe to ensure that the right +// graphdriver is used. +func panicIfUsedByLcow() { + if system.LCOWSupported() { + panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + panicIfUsedByLcow() + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + osv := system.GetOSVersion() + for { + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enhancing the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { + break + } + retryCount++ + time.Sleep(10 * time.Millisecond) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, options.MountLabel) + var dir string + + switch len(options.Options) { + case 0: + case 1: + if options.Options[0] == "ro" { + // ignore "ro" option + break + } + fallthrough + default: + return "", fmt.Errorf("windows driver does not support mount options") + } + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For VFS, it queries the directory for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.dir(id)) +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + panicIfUsedByLcow() + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + _, exists := d.cache[rID] + delete(d.cache, rID) + d.cacheMu.Unlock() + + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (_ io.ReadCloser, err error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { + panicIfUsedByLcow() + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (int64, error) { + panicIfUsedByLcow() + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, options.Diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { + panicIfUsedByLcow() + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, idMappings, rPId, parentMappings, mountLabel) + if err != nil { + return + } + + layerFs, err := d.Get(id, graphdriver.MountOpts{}) + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// Metadata returns custom driver information. +func (d *Driver) Metadata(id string) (map[string]string, error) { + panicIfUsedByLcow() + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := windows.UTF16FromString(path) + if err != nil { + return err + } + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + panicIfUsedByLcow() + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} + +// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from +// matching those in toContainer to matching those in toHost. +func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { + return fmt.Errorf("windows doesn't support changing ID mappings") +} + +// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS +func (d *Driver) SupportsShifting() bool { + return false +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS new file mode 100644 index 00000000000..9c270c541f5 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go new file mode 100644 index 00000000000..f29dc8f8556 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -0,0 +1,512 @@ +// +build linux freebsd + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "time" + + graphdriver "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/mistifyio/go-zfs" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +type zfsOptions struct { + fsName string + mountPath string + mountOptions string +} + +const defaultPerms = os.FileMode(0555) + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.WithField("storage-driver", "zfs").Debugf("%s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt graphdriver.Options) (graphdriver.Driver, error) { + var err error + + logger := logrus.WithField("storage-driver", "zfs") + + if _, err := exec.LookPath("zfs"); err != nil { + logger.Debugf("zfs command is not available: %v", err) + return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available") + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600) + if err != nil { + logger.Debugf("cannot open /dev/zfs: %v", err) + return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err) + } + defer file.Close() + + options, err := parseOptions(opt.DriverOptions) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(opt.UIDMaps, opt.GIDMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/gid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: opt.UIDMaps, + gidMaps: opt.GIDMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + case "zfs.mountopt": + options.mountOptions = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := unix.Stat(m.Mountpoint, &stat); err != nil { + logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.FSType == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is called on when program exits, it is a no-op for ZFS. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// Metadata returns image/container metadata related to graph driver +func (d *Driver) Metadata(id string) (map[string]string, error) { + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateFromTemplate creates a layer with the same contents and parent as another layer. +func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { + return d.Create(id, template, opts) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + err := d.create(id, parent, opts) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, opts) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + name := d.zfsPath(id) + mountpoint := d.mountPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + var rootUID, rootGID int + var mountLabel string + if opts != nil { + rootUID, rootGID, err = idtools.GetRootUIDGID(opts.UIDs(), opts.GIDs()) + if err != nil { + return fmt.Errorf("Failed to get root uid/gid: %v", err) + } + mountLabel = opts.MountLabel + } + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + + if err := idtools.MkdirAllAs(mountpoint, defaultPerms, rootUID, rootGID); err != nil { + return err + } + defer func() { + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logrus.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + }() + + mountOpts := label.FormatMountLabel(d.options.mountOptions, mountLabel) + + if err := mount.Mount(name, mountpoint, "zfs", mountOpts); err != nil { + return errors.Wrap(err, "error creating zfs mount") + } + defer func() { + if err := detachUnmount(mountpoint); err != nil { + logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + } + }() + + if err := os.Chmod(mountpoint, defaultPerms); err != nil { + return errors.Wrap(err, "error setting permissions on zfs mount") + } + + // this is our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + return errors.Wrapf(err, "modifying zfs mountpoint (%s) ownership", mountpoint) + } + + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) { + + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mountpoint); c <= 0 { + if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil { + logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) + } + if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) + } + + } + } + }() + + // In the case of a read-only mount we first mount read-write so we can set the + // correct permissions on the mount point and remount read-only afterwards. + remountReadOnly := false + mountOptions := d.options.mountOptions + if len(options.Options) > 0 { + var newOptions []string + for _, option := range options.Options { + if option == "ro" { + // Filter out read-only mount option but remember for later remounting. + remountReadOnly = true + } else { + newOptions = append(newOptions, option) + } + } + mountOptions = strings.Join(newOptions, ",") + } + + filesystem := d.zfsPath(id) + opts := label.FormatMountLabel(mountOptions, options.MountLabel) + logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { + return "", errors.Wrap(err, "error creating zfs mount") + } + + if remountReadOnly { + opts = label.FormatMountLabel("remount,ro", options.MountLabel) + if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil { + return "", errors.Wrap(err, "error remounting zfs mount read-only") + } + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + + logger := logrus.WithField("storage-driver", "zfs") + + logger.Debugf(`unmount("%s")`, mountpoint) + + if err := detachUnmount(mountpoint); err != nil { + logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) + } + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + + return nil +} + +// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. +// For ZFS, it queries the full mount path for this ID. +func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { + return directory.Usage(d.mountPath(id)) +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] +} + +// AdditionalImageStores returns additional image stores supported by the driver +func (d *Driver) AdditionalImageStores() []string { + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go new file mode 100644 index 00000000000..61a2ed871a4 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -0,0 +1,34 @@ +package zfs + +import ( + "fmt" + + "github.com/containers/storage/drivers" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootdir string) error { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) + } + + return nil +} + +func getMountpoint(id string) string { + return id +} + +func detachUnmount(mountpoint string) error { + // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH + return unix.Unmount(mountpoint, unix.MNT_FORCE) +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go new file mode 100644 index 00000000000..44c68f394ec --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -0,0 +1,34 @@ +package zfs + +import ( + graphdriver "github.com/containers/storage/drivers" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func checkRootdirFs(rootDir string) error { + fsMagic, err := graphdriver.GetFSMagic(rootDir) + if err != nil { + return err + } + backingFS := "unknown" + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFS = fsName + } + + if fsMagic != graphdriver.FsMagicZfs { + logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") + return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir) + } + + return nil +} + +func getMountpoint(id string) string { + return id +} + +func detachUnmount(mountpoint string) error { + return unix.Unmount(mountpoint, unix.MNT_DETACH) +} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go new file mode 100644 index 00000000000..643b169bc5c --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go new file mode 100644 index 00000000000..de6e377541c --- /dev/null +++ b/vendor/github.com/containers/storage/errors.go @@ -0,0 +1,65 @@ +package storage + +import ( + "errors" + + "github.com/containers/storage/types" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = types.ErrContainerUnknown + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = types.ErrDigestUnknown + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = types.ErrDuplicateID + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = types.ErrDuplicateImageNames + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = types.ErrDuplicateLayerNames + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = types.ErrDuplicateName + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = types.ErrImageUnknown + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = types.ErrImageUsedByContainer + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = types.ErrIncompleteOptions + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = types.ErrInvalidBigDataName + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = types.ErrLayerHasChildren + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = types.ErrLayerNotMounted + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = types.ErrLayerUnknown + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = types.ErrLayerUsedByContainer + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = types.ErrLayerUsedByImage + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = types.ErrLoadError + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = types.ErrNotAContainer + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = types.ErrNotALayer + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = types.ErrNotAnID + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = types.ErrNotAnImage + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = types.ErrParentIsContainer + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = types.ErrParentUnknown + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = types.ErrSizeUnknown + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = types.ErrStoreIsReadOnly + // ErrNotSupported is returned when the requested functionality is not supported. + ErrNotSupported = types.ErrNotSupported + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = types.ErrInvalidMappings + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") +) diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go new file mode 100644 index 00000000000..0a06a43235f --- /dev/null +++ b/vendor/github.com/containers/storage/idset.go @@ -0,0 +1,265 @@ +package storage + +import ( + "fmt" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/google/go-intervals/intervalset" + "github.com/pkg/errors" +) + +// idSet represents a set of integer IDs. It is stored as an ordered set of intervals. +type idSet struct { + set *intervalset.ImmutableSet +} + +func newIDSet(intervals []interval) *idSet { + s := intervalset.Empty() + for _, i := range intervals { + s.Add(intervalset.NewSet([]intervalset.Interval{i})) + } + return &idSet{set: s.ImmutableSet()} +} + +// getHostIDs returns all the host ids in the id map. +func getHostIDs(idMaps []idtools.IDMap) *idSet { + var intervals []interval + for _, m := range idMaps { + intervals = append(intervals, interval{start: m.HostID, end: m.HostID + m.Size}) + } + return newIDSet(intervals) +} + +// getContainerIDs returns all the container ids in the id map. +func getContainerIDs(idMaps []idtools.IDMap) *idSet { + var intervals []interval + for _, m := range idMaps { + intervals = append(intervals, interval{start: m.ContainerID, end: m.ContainerID + m.Size}) + } + return newIDSet(intervals) +} + +// subtract returns the subtraction of `s` and `t`. `s` and `t` are unchanged. +func (s *idSet) subtract(t *idSet) *idSet { + if s == nil || t == nil { + return s + } + return &idSet{set: s.set.Sub(t.set)} +} + +// union returns the union of `s` and `t`. `s` and `t` are unchanged. +func (s *idSet) union(t *idSet) *idSet { + if s == nil { + return t + } else if t == nil { + return s + } + return &idSet{set: s.set.Union(t.set)} +} + +// Methods to iterate over the intervals of the idSet. intervalset doesn't provide one :-( + +// iterator to idSet. Returns nil if iteration finishes. +type iteratorFn func() *interval + +// cancelFn must be called exactly once unless iteratorFn returns nil, otherwise go routine might +// leak. +type cancelFn func() + +func (s *idSet) iterator() (iteratorFn, cancelFn) { + if s == nil { + return func() *interval { return nil }, func() {} + } + cancelCh := make(chan byte) + dataCh := make(chan interval) + go func() { + s.set.Intervals(func(ii intervalset.Interval) bool { + select { + case <-cancelCh: + return false + case dataCh <- ii.(interval): + return true + } + }) + close(dataCh) + }() + iterator := func() *interval { + i, ok := <-dataCh + if !ok { + return nil + } + return &i + } + return iterator, func() { close(cancelCh) } +} + +// size returns the total number of ids in the ID set. +func (s *idSet) size() int { + var size int + iterator, cancel := s.iterator() + defer cancel() + for i := iterator(); i != nil; i = iterator() { + size += i.length() + } + return size +} + +// findAvailable finds the `n` ids from `s`. +func (s *idSet) findAvailable(n int) (*idSet, error) { + var intervals []intervalset.Interval + iterator, cancel := s.iterator() + defer cancel() + for i := iterator(); n > 0 && i != nil; i = iterator() { + i.end = minInt(i.end, i.start+n) + intervals = append(intervals, *i) + n -= i.length() + } + if n > 0 { + return nil, errors.New("could not find enough available IDs") + } + return &idSet{set: intervalset.NewImmutableSet(intervals)}, nil +} + +// zip creates an id map from `s` (host ids) and container ids. +func (s *idSet) zip(container *idSet) []idtools.IDMap { + hostIterator, hostCancel := s.iterator() + defer hostCancel() + containerIterator, containerCancel := container.iterator() + defer containerCancel() + var out []idtools.IDMap + for h, c := hostIterator(), containerIterator(); h != nil && c != nil; { + if n := minInt(h.length(), c.length()); n > 0 { + out = append(out, idtools.IDMap{ + ContainerID: c.start, + HostID: h.start, + Size: n, + }) + h.start += n + c.start += n + } + if h.IsZero() { + h = hostIterator() + } + if c.IsZero() { + c = containerIterator() + } + } + return out +} + +// interval represents an interval of integers [start, end). Note it is allowed to have +// start >= end, in which case it is treated as an empty interval. It implements interface +// intervalset.Interval. +type interval struct { + // Start of the interval (inclusive). + start int + // End of the interval (exclusive). + end int +} + +func (i interval) length() int { + return maxInt(0, i.end-i.start) +} + +func (i interval) Intersect(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + return interval{start: maxInt(i.start, j.start), end: minInt(i.end, j.end)} +} + +func (i interval) Before(other intervalset.Interval) bool { + j := other.(interval) + return !i.IsZero() && !j.IsZero() && i.end < j.start +} + +func (i interval) IsZero() bool { + return i.length() <= 0 +} + +func (i interval) Bisect(other intervalset.Interval) (intervalset.Interval, intervalset.Interval) { + j := other.(interval) + if j.IsZero() { + return i, interval{} + } + // Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and + // [j.end, +inf). + left := interval{start: i.start, end: minInt(i.end, j.start)} + right := interval{start: maxInt(i.start, j.end), end: i.end} + return left, right +} + +func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) { + return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + } + return interval{} +} + +func (i interval) Encompass(other intervalset.Interval) intervalset.Interval { + j := other.(interval) + switch { + case i.IsZero(): + return j + case j.IsZero(): + return i + default: + return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + } +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a < b { + return b + } + return a +} + +func hasOverlappingRanges(mappings []idtools.IDMap) error { + hostIntervals := intervalset.Empty() + containerIntervals := intervalset.Empty() + + var conflicts []string + + for _, m := range mappings { + c := interval{start: m.ContainerID, end: m.ContainerID + m.Size} + h := interval{start: m.HostID, end: m.HostID + m.Size} + + added := false + overlaps := false + + containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + added = true + } + containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c})) + + hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps && !added { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + } + hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h})) + } + + if conflicts != nil { + if len(conflicts) == 1 { + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0]) + } + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", ")) + } + return nil +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go new file mode 100644 index 00000000000..a4c3ed22c75 --- /dev/null +++ b/vendor/github.com/containers/storage/images.go @@ -0,0 +1,842 @@ +package storage + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" + "github.com/containers/storage/pkg/truncindex" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const ( + // ImageDigestManifestBigDataNamePrefix is a prefix of big data item + // names which we consider to be manifests, used for computing a + // "digest" value for the image as a whole, by which we can locate the + // image later. + ImageDigestManifestBigDataNamePrefix = "manifest" + // ImageDigestBigDataKey is provided for compatibility with older + // versions of the image library. It will be removed in the future. + ImageDigestBigDataKey = "manifest" +) + +// An Image is a reference to a layer and an associated metadata string. +type Image struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Digest is a digest value that we can use to locate the image, if one + // was specified at creation-time. + Digest digest.Digest `json:"digest,omitempty"` + + // Digests is a list of digest values of the image's manifests, and + // possibly a manually-specified value, that we can use to locate the + // image. If Digest is set, its value is also in this list. + Digests []digest.Digest `json:"-"` + + // Names is an optional set of user-defined convenience values. The + // image can be referred to by its ID or any of its names. Names are + // unique among images, and are often the text representation of tagged + // or canonical references. + Names []string `json:"names,omitempty"` + + // NamesHistory is an optional set of Names the image had in the past. The + // contained names are free from any duplicates, whereas the newest entry + // is the first one. + NamesHistory []string `json:"names-history,omitempty"` + + // TopLayer is the ID of the topmost layer of the image itself, if the + // image contains one or more layers. Multiple images can refer to the + // same top layer. + TopLayer string `json:"layer,omitempty"` + + // MappedTopLayers are the IDs of alternate versions of the top layer + // which have the same contents and parent, and which differ from + // TopLayer only in which ID mappings they use. When the image is + // to be removed, they should be removed before the TopLayer, as the + // graph driver may depend on that. + MappedTopLayers []string `json:"mapped-layers,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + // BigDataDigests maps the names in BigDataNames to the digests of the + // data that has been stored, if they're known. + BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` + + // Created is the datestamp for when this image was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // ReadOnly is true if this image resides in a read-only layer store. + ReadOnly bool `json:"-"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ROImageStore provides bookkeeping for information about Images. +type ROImageStore interface { + ROFileBasedStore + ROMetadataStore + ROBigDataStore + + // Exists checks if there is an image with the given ID or name. + Exists(id string) bool + + // Get retrieves information about an image given an ID or name. + Get(id string) (*Image, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Images returns a slice enumerating the known images. + Images() ([]Image, error) + + // ByDigest returns a slice enumerating the images which have either an + // explicitly-set digest, or a big data item with a name that starts + // with ImageDigestManifestBigDataNamePrefix, which matches the + // specified digest. + ByDigest(d digest.Digest) ([]*Image, error) +} + +// ImageStore provides bookkeeping for information about Images. +type ImageStore interface { + ROImageStore + RWFileBasedStore + RWMetadataStore + RWImageBigDataStore + FlaggableStore + + // Create creates an image that has a specified ID (or a random one) and + // optional names, using the specified layer as its topmost (hopefully + // read-only) layer. That layer can be referenced by multiple images. + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) + + // SetNames replaces the list of names associated with an image with the + // supplied values. The values are expected to be valid normalized + // named image references. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the supplied values to the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + RemoveNames(id string, names []string) error + + // Delete removes the record of the image. + Delete(id string) error + + // Wipe removes records of all images. + Wipe() error +} + +type imageStore struct { + lockfile Locker + dir string + images []*Image + idindex *truncindex.TruncIndex + byid map[string]*Image + byname map[string]*Image + bydigest map[digest.Digest][]*Image + loadMut sync.Mutex +} + +func copyImage(i *Image) *Image { + return &Image{ + ID: i.ID, + Digest: i.Digest, + Digests: copyDigestSlice(i.Digests), + Names: copyStringSlice(i.Names), + NamesHistory: copyStringSlice(i.NamesHistory), + TopLayer: i.TopLayer, + MappedTopLayers: copyStringSlice(i.MappedTopLayers), + Metadata: i.Metadata, + BigDataNames: copyStringSlice(i.BigDataNames), + BigDataSizes: copyStringInt64Map(i.BigDataSizes), + BigDataDigests: copyStringDigestMap(i.BigDataDigests), + Created: i.Created, + ReadOnly: i.ReadOnly, + Flags: copyStringInterfaceMap(i.Flags), + } +} + +func copyImageSlice(slice []*Image) []*Image { + if len(slice) > 0 { + cp := make([]*Image, len(slice)) + for i := range slice { + cp[i] = copyImage(slice[i]) + } + return cp + } + return nil +} + +func (r *imageStore) Images() ([]Image, error) { + images := make([]Image, len(r.images)) + for i := range r.images { + images[i] = *copyImage(r.images[i]) + } + return images, nil +} + +func (r *imageStore) imagespath() string { + return filepath.Join(r.dir, "images.json") +} + +func (r *imageStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *imageStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +// bigDataNameIsManifest determines if a big data item with the specified name +// is considered to be representative of the image, in that its digest can be +// said to also be the image's digest. Currently, if its name is, or begins +// with, "manifest", we say that it is. +func bigDataNameIsManifest(name string) bool { + return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix) +} + +// recomputeDigests takes a fixed digest and a name-to-digest map and builds a +// list of the unique values that would identify the image. +func (i *Image) recomputeDigests() error { + validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1) + digests := make(map[digest.Digest]struct{}) + if i.Digest != "" { + if err := i.Digest.Validate(); err != nil { + return errors.Wrapf(err, "error validating image digest %q", string(i.Digest)) + } + digests[i.Digest] = struct{}{} + validDigests = append(validDigests, i.Digest) + } + for name, digest := range i.BigDataDigests { + if !bigDataNameIsManifest(name) { + continue + } + if digest.Validate() != nil { + return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name) + } + // Deduplicate the digest values. + if _, known := digests[digest]; !known { + digests[digest] = struct{}{} + validDigests = append(validDigests, digest) + } + } + if i.Digest == "" && len(validDigests) > 0 { + i.Digest = validDigests[0] + } + i.Digests = validDigests + return nil +} + +func (r *imageStore) Load() error { + shouldSave := false + rpath := r.imagespath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + images := []*Image{} + idlist := []string{} + ids := make(map[string]*Image) + names := make(map[string]*Image) + digests := make(map[digest.Digest][]*Image) + if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(images)) + for n, image := range images { + ids[image.ID] = images[n] + idlist = append(idlist, image.ID) + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + shouldSave = true + } + } + // Compute the digest list. + err = image.recomputeDigests() + if err != nil { + return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names) + } + for _, name := range image.Names { + names[name] = image + } + for _, digest := range image.Digests { + list := digests[digest] + digests[digest] = append(list, image) + } + image.ReadOnly = !r.IsReadWrite() + } + } + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { + return ErrDuplicateImageNames + } + r.images = images + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.byname = names + r.bydigest = digests + if shouldSave { + return r.Save() + } + return nil +} + +func (r *imageStore) Save() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) + } + if !r.Locked() { + return errors.New("image store is not locked for writing") + } + rpath := r.imagespath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jdata, err := json.Marshal(&r.images) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newImageStore(dir string) (ImageStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func newROImageStore(dir string) (ROImageStore, error) { + lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.RLock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []*Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func (r *imageStore) lookup(id string) (*Image, bool) { + if image, ok := r.byid[id]; ok { + return image, ok + } else if image, ok := r.byname[id]; ok { + return image, ok + } else if longid, err := r.idindex.Get(id); err == nil { + image, ok := r.byid[longid] + return image, ok + } + return nil, false +} + +func (r *imageStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + delete(image.Flags, flag) + return r.Save() +} + +func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + if image.Flags == nil { + image.Flags = make(map[string]interface{}) + } + image.Flags[flag] = value + return r.Save() +} + +func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { + if !r.IsReadWrite() { + return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id) + } + names = dedupeNames(names) + for _, name := range names { + if image, nameInUse := r.byname[name]; nameInUse { + return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID) + } + } + if created.IsZero() { + created = time.Now().UTC() + } + + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err = image.recomputeDigests() + if err != nil { + return nil, errors.Wrapf(err, "error validating digests for new image") + } + r.images = append(r.images, image) + r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) + } + err = r.Save() + image = copyImage(image) + return image, err +} + +func (r *imageStore) addMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + image.MappedTopLayers = append(image.MappedTopLayers, layer) + return r.Save() + } + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) removeMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + initialLen := len(image.MappedTopLayers) + image.MappedTopLayers = stringutils.RemoveFromSlice(image.MappedTopLayers, layer) + // No layer was removed. No need to save. + if initialLen == len(image.MappedTopLayers) { + return nil + } + return r.Save() + } + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) Metadata(id string) (string, error) { + if image, ok := r.lookup(id); ok { + return image.Metadata, nil + } + return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath()) + } + if image, ok := r.lookup(id); ok { + image.Metadata = metadata + return r.Save() + } + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) removeName(image *Image, name string) { + image.Names = stringSliceWithoutValue(image.Names, name) +} + +func (i *Image) addNameToHistory(name string) { + i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (r *imageStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *imageStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *imageStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) + } + r.byname[name] = image + image.addNameToHistory(name) + } + image.Names = names + return r.Save() +} + +func (r *imageStore) Delete(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + id = image.ID + toDeleteIndex := -1 + for i, candidate := range r.images { + if candidate.ID == id { + toDeleteIndex = i + } + } + delete(r.byid, id) + r.idindex.Delete(id) + for _, name := range image.Names { + delete(r.byname, name) + } + for _, digest := range image.Digests { + prunedList := imageSliceWithoutValue(r.bydigest[digest], image) + if len(prunedList) == 0 { + delete(r.bydigest, digest) + } else { + r.bydigest[digest] = prunedList + } + } + if toDeleteIndex != -1 { + // delete the image at toDeleteIndex + if toDeleteIndex == len(r.images)-1 { + r.images = r.images[:len(r.images)-1] + } else { + r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) + } + } + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + return nil +} + +func (r *imageStore) Get(id string) (*Image, error) { + if image, ok := r.lookup(id); ok { + return copyImage(image), nil + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) Lookup(name string) (id string, err error) { + if image, ok := r.lookup(name); ok { + return image.ID, nil + } + return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (r *imageStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { + if images, ok := r.bydigest[d]; ok { + return copyImageSlice(images), nil + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with digest %q", d) +} + +func (r *imageStore) BigData(id, key string) ([]byte, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") + } + image, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + return ioutil.ReadFile(r.datapath(image.ID, key)) +} + +func (r *imageStore) BigDataSize(id, key string) (int64, error) { + if key == "" { + return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") + } + image, ok := r.lookup(id) + if !ok { + return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + if size, ok := image.BigDataSizes[key]; ok { + return size, nil + } + if data, err := r.BigData(id, key); err == nil && data != nil { + return int64(len(data)), nil + } + return -1, ErrSizeUnknown +} + +func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { + if key == "" { + return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") + } + image, ok := r.lookup(id) + if !ok { + return "", errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + if d, ok := image.BigDataDigests[key]; ok { + return d, nil + } + return "", ErrDigestUnknown +} + +func (r *imageStore) BigDataNames(id string) ([]string, error) { + image, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + return copyStringSlice(image.BigDataNames), nil +} + +func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { + modified := make([]*Image, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") + } + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) + } + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + err := os.MkdirAll(r.datadir(image.ID), 0700) + if err != nil { + return err + } + var newDigest digest.Digest + if bigDataNameIsManifest(key) { + if digestManifest == nil { + return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided") + } + if newDigest, err = digestManifest(data); err != nil { + return errors.Wrapf(err, "error digesting manifest") + } + } else { + newDigest = digest.Canonical.FromBytes(data) + } + err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) + if err == nil { + save := false + if image.BigDataSizes == nil { + image.BigDataSizes = make(map[string]int64) + } + oldSize, sizeOk := image.BigDataSizes[key] + image.BigDataSizes[key] = int64(len(data)) + if image.BigDataDigests == nil { + image.BigDataDigests = make(map[string]digest.Digest) + } + oldDigest, digestOk := image.BigDataDigests[key] + image.BigDataDigests[key] = newDigest + if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { + save = true + } + addName := true + for _, name := range image.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + image.BigDataNames = append(image.BigDataNames, key) + save = true + } + for _, oldDigest := range image.Digests { + // remove the image from the list of images in the digest-based index + if list, ok := r.bydigest[oldDigest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, oldDigest) + } else { + r.bydigest[oldDigest] = prunedList + } + } + } + if err = image.recomputeDigests(); err != nil { + return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID) + } + for _, newDigest := range image.Digests { + // add the image to the list of images in the digest-based index which + // corresponds to the new digest for this item, unless it's already there + list := r.bydigest[newDigest] + if len(list) == len(imageSliceWithoutValue(list, image)) { + // the list isn't shortened by trying to prune this image from it, + // so it's not in there yet + r.bydigest[newDigest] = append(list, image) + } + } + if save { + err = r.Save() + } + } + return err +} + +func (r *imageStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *imageStore) Lock() { + r.lockfile.Lock() +} + +func (r *imageStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *imageStore) RLock() { + r.lockfile.RLock() +} + +func (r *imageStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *imageStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *imageStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *imageStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *imageStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *imageStore) Locked() bool { + return r.lockfile.Locked() +} + +func (r *imageStore) ReloadIfChanged() error { + r.loadMut.Lock() + defer r.loadMut.Unlock() + + modified, err := r.Modified() + if err == nil && modified { + return r.Load() + } + return err +} diff --git a/vendor/github.com/containers/storage/jsoniter.go b/vendor/github.com/containers/storage/jsoniter.go new file mode 100644 index 00000000000..7dd6388d7f3 --- /dev/null +++ b/vendor/github.com/containers/storage/jsoniter.go @@ -0,0 +1,5 @@ +package storage + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go new file mode 100644 index 00000000000..bba8d7588f7 --- /dev/null +++ b/vendor/github.com/containers/storage/layers.go @@ -0,0 +1,1967 @@ +package storage + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/tarlog" + "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + tarSplitSuffix = ".tar-split.gz" + incompleteFlag = "incomplete" +) + +// A Layer is a record of a copy-on-write layer that's stored by the lower +// level graph driver. +type Layer struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // layer can be referred to by its ID or any of its names. Names are + // unique among layers. + Names []string `json:"names,omitempty"` + + // Parent is the ID of a layer from which this layer inherits data. + Parent string `json:"parent,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // MountLabel is an SELinux label which should be used when attempting to mount + // the layer. + MountLabel string `json:"mountlabel,omitempty"` + + // MountPoint is the path where the layer is mounted, or where it was most + // recently mounted. This can change between subsequent Unmount() and + // Mount() calls, so the caller should consult this value after Mount() + // succeeds to find the location of the container's root filesystem. + MountPoint string `json:"-"` + + // MountCount is used as a reference count for the container's layer being + // mounted at the mount point. + MountCount int `json:"-"` + + // Created is the datestamp for when this layer was created. Older + // versions of the library did not track this information, so callers + // will likely want to use the IsZero() method to verify that a value + // is set before using it. + Created time.Time `json:"created,omitempty"` + + // CompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. + CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"` + + // CompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), as it was presented to us. If + // CompressedDigest is not set, this should be treated as if it were an + // uninitialized value. + CompressedSize int64 `json:"compressed-size,omitempty"` + + // UncompressedDigest is the digest of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. Often referred to + // as a DiffID. + UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` + + // UncompressedSize is the length of the blob that was last passed to + // ApplyDiff() or Put(), after we decompressed it. If + // UncompressedDigest is not set, this should be treated as if it were + // an uninitialized value. + UncompressedSize int64 `json:"diff-size,omitempty"` + + // CompressionType is the type of compression which we detected on the blob + // that was last passed to ApplyDiff() or Put(). + CompressionType archive.Compression `json:"compression,omitempty"` + + // UIDs and GIDs are lists of UIDs and GIDs used in the layer. This + // field is only populated (i.e., will only contain one or more + // entries) if the layer was created using ApplyDiff() or Put(). + UIDs []uint32 `json:"uidset,omitempty"` + GIDs []uint32 `json:"gidset,omitempty"` + + // Flags is arbitrary data about the layer. + Flags map[string]interface{} `json:"flags,omitempty"` + + // UIDMap and GIDMap are used for setting up a layer's contents + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + + // ReadOnly is true if this layer resides in a read-only layer store. + ReadOnly bool `json:"-"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` +} + +type layerMountPoint struct { + ID string `json:"id"` + MountPoint string `json:"path"` + MountCount int `json:"count"` +} + +// DiffOptions override the default behavior of Diff() methods. +type DiffOptions struct { + // Compression, if set overrides the default compressor when generating a diff. + Compression *archive.Compression +} + +// ROLayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type ROLayerStore interface { + ROFileBasedStore + ROMetadataStore + ROLayerBigDataStore + + // Exists checks if a layer with the specified name or ID is known. + Exists(id string) bool + + // Get retrieves information about a layer given an ID or name. + Get(id string) (*Layer, error) + + // Status returns an slice of key-value pairs, suitable for human consumption, + // relaying whatever status information the underlying driver can share. + Status() ([][2]string, error) + + // Changes returns a slice of Change structures, which contain a pathname + // (Path) and a description of what sort of change (Kind) was made by the + // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a + // specified layer. By default, the layer's parent is used as a reference. + Changes(from, to string) ([]archive.Change, error) + + // Diff produces a tarstream which can be applied to a layer with the contents + // of the first layer to produce a layer with the contents of the second layer. + // By default, the parent of the second layer is used as the first + // layer, so it need not be specified. Options can be used to override + // default behavior, but are also not required. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // DiffSize produces an estimate of the length of the tarstream which would be + // produced by Diff. + DiffSize(from, to string) (int64, error) + + // Size produces a cached value for the uncompressed size of the layer, + // if one is known, or -1 if it is not known. If the layer can not be + // found, it returns an error. + Size(name string) (int64, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // Layers returns a slice of the known layers. + Layers() ([]Layer, error) +} + +// LayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type LayerStore interface { + ROLayerStore + RWFileBasedStore + RWMetadataStore + FlaggableStore + RWLayerBigDataStore + + // Create creates a new layer, optionally giving it a specified ID rather than + // a randomly-generated one, either inheriting data from another specified + // layer or the empty base layer. The new layer can optionally be given names + // and have an SELinux label specified for use when mounting it. Some + // underlying drivers can accept a "size" option. At this time, most + // underlying drivers do not themselves distinguish between writeable + // and read-only layers. + Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error) + + // CreateWithFlags combines the functions of Create and SetFlag. + CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + + // Put combines the functions of CreateWithFlags and ApplyDiff. + Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) + + // SetNames replaces the list of names associated with a layer with the + // supplied values. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the supplied values to the list of names associated with the layer with the + // specified id. + AddNames(id string, names []string) error + + // RemoveNames remove the supplied values from the list of names associated with the layer with the + // specified id. + RemoveNames(id string, names []string) error + + // Delete deletes a layer with the specified name or ID. + Delete(id string) error + + // Wipe deletes all layers. + Wipe() error + + // Mount mounts a layer for use. If the specified layer is the parent of other + // layers, it should not be written to. An SELinux label to be applied to the + // mount can be specified to override the one configured for the layer. + // The mappings used by the container can be specified. + Mount(id string, options drivers.MountOpts) (string, error) + + // Unmount unmounts a layer when it is no longer in use. + Unmount(id string, force bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + Mounted(id string) (int, error) + + // ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint + // for which the layer's UID and GID maps don't contain corresponding entries. + ParentOwners(id string) (uids, gids []int, err error) + + // ApplyDiff reads a tarstream which was created by a previous call to Diff and + // applies its changes to a specified layer. + ApplyDiff(to string, diff io.Reader) (int64, error) + + // ApplyDiffWithDiffer applies the changes through the differ callback function. + // If to is the empty string, then a staging directory is created by the driver. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // DifferTarget gets the location where files are stored for the layer. + DifferTarget(id string) (string, error) + + // LoadLocked wraps Load in a locked state. This means it loads the store + // and cleans-up invalid layers if needed. + LoadLocked() error + + // PutAdditionalLayer creates a layer using the diff contained in the additional layer + // store. + // This API is experimental and can be changed without bumping the major version number. + PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) +} + +type layerStore struct { + lockfile Locker + mountsLockfile Locker + rundir string + driver drivers.Driver + layerdir string + layers []*Layer + idindex *truncindex.TruncIndex + byid map[string]*Layer + byname map[string]*Layer + bymount map[string]*Layer + bycompressedsum map[digest.Digest][]string + byuncompressedsum map[digest.Digest][]string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + loadMut sync.Mutex + layerspathModified time.Time +} + +func copyLayer(l *Layer) *Layer { + return &Layer{ + ID: l.ID, + Names: copyStringSlice(l.Names), + Parent: l.Parent, + Metadata: l.Metadata, + MountLabel: l.MountLabel, + MountPoint: l.MountPoint, + MountCount: l.MountCount, + Created: l.Created, + CompressedDigest: l.CompressedDigest, + CompressedSize: l.CompressedSize, + UncompressedDigest: l.UncompressedDigest, + UncompressedSize: l.UncompressedSize, + CompressionType: l.CompressionType, + ReadOnly: l.ReadOnly, + BigDataNames: copyStringSlice(l.BigDataNames), + Flags: copyStringInterfaceMap(l.Flags), + UIDMap: copyIDMap(l.UIDMap), + GIDMap: copyIDMap(l.GIDMap), + UIDs: copyUint32Slice(l.UIDs), + GIDs: copyUint32Slice(l.GIDs), + } +} + +func (r *layerStore) Layers() ([]Layer, error) { + layers := make([]Layer, len(r.layers)) + for i := range r.layers { + layers[i] = *copyLayer(r.layers[i]) + } + return layers, nil +} + +func (r *layerStore) mountspath() string { + return filepath.Join(r.rundir, "mountpoints.json") +} + +func (r *layerStore) layerspath() string { + return filepath.Join(r.layerdir, "layers.json") +} + +func (r *layerStore) Load() error { + shouldSave := false + rpath := r.layerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layers := []*Layer{} + idlist := []string{} + ids := make(map[string]*Layer) + names := make(map[string]*Layer) + compressedsums := make(map[digest.Digest][]string) + uncompressedsums := make(map[digest.Digest][]string) + if r.IsReadWrite() { + label.ClearLabels() + } + if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { + idlist = make([]string, 0, len(layers)) + for n, layer := range layers { + ids[layer.ID] = layers[n] + idlist = append(idlist, layer.ID) + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + shouldSave = true + } + names[name] = layers[n] + } + if layer.CompressedDigest != "" { + compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) + } + if layer.MountLabel != "" { + label.ReserveLabel(layer.MountLabel) + } + layer.ReadOnly = !r.IsReadWrite() + } + err = nil + } + if shouldSave && (!r.IsReadWrite() || !r.Locked()) { + return ErrDuplicateLayerNames + } + r.layers = layers + r.idindex = truncindex.NewTruncIndex(idlist) + r.byid = ids + r.byname = names + r.bycompressedsum = compressedsums + r.byuncompressedsum = uncompressedsums + + // Load and merge information about which layers are mounted, and where. + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if err = r.loadMounts(); err != nil { + return err + } + + // Last step: as we’re writable, try to remove anything that a previous + // user of this storage area marked for deletion but didn't manage to + // actually delete. + if r.Locked() { + for _, layer := range r.layers { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err = r.deleteInternal(layer.ID) + if err != nil { + break + } + shouldSave = true + } + } + } + if shouldSave { + return r.saveLayers() + } + } + + return err +} + +func (r *layerStore) LoadLocked() error { + r.lockfile.Lock() + defer r.lockfile.Unlock() + return r.Load() +} + +func (r *layerStore) loadMounts() error { + mounts := make(map[string]*Layer) + mpath := r.mountspath() + data, err := ioutil.ReadFile(mpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layerMounts := []layerMountPoint{} + if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { + // Clear all of our mount information. If another process + // unmounted something, it (along with its zero count) won't + // have been encoded into the version of mountpoints.json that + // we're loading, so our count could fall out of sync with it + // if we don't, and if we subsequently change something else, + // we'd pass that error along to other process that reloaded + // the data after we saved it. + for _, layer := range r.layers { + layer.MountPoint = "" + layer.MountCount = 0 + } + // All of the non-zero count values will have been encoded, so + // we reset the still-mounted ones based on the contents. + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := r.lookup(mount.ID); ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount + } + } + } + err = nil + } + r.bymount = mounts + return err +} + +func (r *layerStore) Save() error { + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + defer r.mountsLockfile.Touch() + if err := r.saveLayers(); err != nil { + return err + } + return r.saveMounts() +} + +func (r *layerStore) saveLayers() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } + if !r.Locked() { + return errors.New("layer store is not locked for writing") + } + rpath := r.layerspath() + if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { + return err + } + jldata, err := json.Marshal(&r.layers) + if err != nil { + return err + } + defer r.Touch() + return ioutils.AtomicWriteFile(rpath, jldata, 0600) +} + +func (r *layerStore) saveMounts() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) + } + if !r.mountsLockfile.Locked() { + return errors.New("layer store mount information is not locked for writing") + } + mpath := r.mountspath() + if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { + return err + } + mounts := make([]layerMountPoint, 0, len(r.layers)) + for _, layer := range r.layers { + if layer.MountPoint != "" && layer.MountCount > 0 { + mounts = append(mounts, layerMountPoint{ + ID: layer.ID, + MountPoint: layer.MountPoint, + MountCount: layer.MountCount, + }) + } + } + jmdata, err := json.Marshal(&mounts) + if err != nil { + return err + } + if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil { + return err + } + return r.loadMounts() +} + +func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { + if err := os.MkdirAll(rundir, 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(layerdir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock")) + if err != nil { + return nil, err + } + rlstore := layerStore{ + lockfile: lockfile, + mountsLockfile: mountsLockfile, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + uidMap: copyIDMap(s.uidMap), + gidMap: copyIDMap(s.gidMap), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) { + lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + rlstore := layerStore{ + lockfile: lockfile, + mountsLockfile: nil, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func (r *layerStore) lookup(id string) (*Layer, bool) { + if layer, ok := r.byid[id]; ok { + return layer, ok + } else if layer, ok := r.byname[id]; ok { + return layer, ok + } else if longid, err := r.idindex.Get(id); err == nil { + layer, ok := r.byid[longid] + return layer, ok + } + return nil, false +} + +func (r *layerStore) Size(name string) (int64, error) { + layer, ok := r.lookup(name) + if !ok { + return -1, ErrLayerUnknown + } + // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that + // a zero value is not just present because it was never set to anything else (which can happen if the layer was + // created by a version of this library that didn't keep track of digest and size information). + if layer.UncompressedDigest != "" { + return layer.UncompressedSize, nil + } + return -1, nil +} + +func (r *layerStore) ClearFlag(id string, flag string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + delete(layer.Flags, flag) + return r.Save() +} + +func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[flag] = value + return r.Save() +} + +func (r *layerStore) Status() ([][2]string, error) { + return r.driver.Status(), nil +} + +func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) { + if duplicateLayer, idInUse := r.byid[id]; idInUse { + return duplicateLayer, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, ErrDuplicateName + } + } + + parent := "" + if parentLayer != nil { + parent = parentLayer.ID + } + + info, err := aLayer.Info() + if err != nil { + return nil, err + } + defer info.Close() + layer = &Layer{} + if err := json.NewDecoder(info).Decode(layer); err != nil { + return nil, err + } + layer.ID = id + layer.Parent = parent + layer.Created = time.Now().UTC() + + if err := aLayer.CreateAs(id, parent); err != nil { + return nil, err + } + + // TODO: check if necessary fields are filled + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { // names got from the additional layer store won't be used + r.byname[name] = layer + } + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } + if err := r.Save(); err != nil { + r.driver.Remove(id) + return nil, err + } + return copyLayer(layer), nil +} + +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { + if !r.IsReadWrite() { + return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) + } + if err := os.MkdirAll(r.rundir, 0700); err != nil { + return nil, -1, err + } + if err := os.MkdirAll(r.layerdir, 0700); err != nil { + return nil, -1, err + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if duplicateLayer, idInUse := r.byid[id]; idInUse { + return duplicateLayer, -1, ErrDuplicateID + } + names = dedupeNames(names) + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, -1, ErrDuplicateName + } + } + parent := "" + if parentLayer != nil { + parent = parentLayer.ID + } + var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + var ( + templateMetadata string + templateCompressedDigest digest.Digest + templateCompressedSize int64 + templateUncompressedDigest digest.Digest + templateUncompressedSize int64 + templateCompressionType archive.Compression + templateUIDs, templateGIDs []uint32 + templateTSdata []byte + ) + if moreOptions.TemplateLayer != "" { + var tserr error + templateLayer, ok := r.lookup(moreOptions.TemplateLayer) + if !ok { + return nil, -1, ErrLayerUnknown + } + templateMetadata = templateLayer.Metadata + templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize + templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize + templateCompressionType = templateLayer.CompressionType + templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID)) + if tserr != nil && !os.IsNotExist(tserr) { + return nil, -1, tserr + } + } else { + templateIDMappings = &idtools.IDMappings{} + } + if parentLayer != nil { + parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap) + } else { + parentMappings = &idtools.IDMappings{} + } + if mountLabel != "" { + label.ReserveLabel(mountLabel) + } + + // Before actually creating the layer, make a persistent record of it with incompleteFlag, + // so that future processes have a chance to delete it. + layer := &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Metadata: templateMetadata, + Created: time.Now().UTC(), + CompressedDigest: templateCompressedDigest, + CompressedSize: templateCompressedSize, + UncompressedDigest: templateUncompressedDigest, + UncompressedSize: templateUncompressedSize, + CompressionType: templateCompressionType, + UIDs: templateUIDs, + GIDs: templateGIDs, + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + } + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value + } + layer.Flags[incompleteFlag] = true + + succeeded := false + cleanupFailureContext := "" + defer func() { + if !succeeded { + // On any error, try both removing the driver's data as well + // as the in-memory layer record. + if err2 := r.Delete(layer.ID); err2 != nil { + if cleanupFailureContext == "" { + cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" + } + logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2) + } + } + }() + + err := r.Save() + if err != nil { + cleanupFailureContext = "saving incomplete layer metadata" + return nil, -1, err + } + + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) + opts := drivers.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: options, + IDMappings: idMappings, + } + if moreOptions.TemplateLayer != "" { + if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + cleanupFailureContext = "creating a layer from template" + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) + } + oldMappings = templateIDMappings + } else { + if writeable { + if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-write layer" + return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) + } + } else { + if err := r.driver.Create(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-only layer" + return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) + } + } + oldMappings = parentMappings + } + if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { + if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + cleanupFailureContext = "in UpdateLayerIDMap" + return nil, -1, err + } + } + if len(templateTSdata) > 0 { + if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { + cleanupFailureContext = "creating tar-split parent directory for a copy from template" + return nil, -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + cleanupFailureContext = "creating a tar-split copy from template" + return nil, -1, err + } + } + + var size int64 = -1 + if diff != nil { + size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) + if err != nil { + cleanupFailureContext = "applying layer diff" + return nil, -1, err + } + } else { + // applyDiffWithOptions in the `diff != nil` case handles this bit for us + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } + } + delete(layer.Flags, incompleteFlag) + err = r.Save() + if err != nil { + cleanupFailureContext = "saving finished layer metadata" + return nil, -1, err + } + + layer = copyLayer(layer) + succeeded = true + return layer, size, err +} + +func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { + layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil) + return layer, err +} + +func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) { + return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil) +} + +func (r *layerStore) Mounted(id string) (int, error) { + if !r.IsReadWrite() { + return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return 0, err + } + } + layer, ok := r.lookup(id) + if !ok { + return 0, ErrLayerUnknown + } + return layer.MountCount, nil +} + +func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { + // check whether options include ro option + hasReadOnlyOpt := func(opts []string) bool { + for _, item := range opts { + if item == "ro" { + return true + } + } + return false + } + + // You are not allowed to mount layers from readonly stores if they + // are not mounted read/only. + if !r.IsReadWrite() && !hasReadOnlyOpt(options.Options) { + return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return "", err + } + } + defer r.mountsLockfile.Touch() + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + if layer.MountCount > 0 { + mounted, err := mount.Mounted(layer.MountPoint) + if err != nil { + return "", err + } + // If the container is not mounted then we have a condition + // where the kernel umounted the mount point. This means + // that the mount count never got decremented. + if mounted { + layer.MountCount++ + return layer.MountPoint, r.saveMounts() + } + } + if options.MountLabel == "" { + options.MountLabel = layer.MountLabel + } + + if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() { + if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) { + return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID) + } + } + mountpoint, err := r.driver.Get(id, options) + if mountpoint != "" && err == nil { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountPoint = filepath.Clean(mountpoint) + layer.MountCount++ + r.bymount[layer.MountPoint] = layer + err = r.saveMounts() + } + return mountpoint, err +} + +func (r *layerStore) Unmount(id string, force bool) (bool, error) { + if !r.IsReadWrite() { + return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + } + r.mountsLockfile.Lock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return false, err + } + } + defer r.mountsLockfile.Touch() + layer, ok := r.lookup(id) + if !ok { + layerByMount, ok := r.bymount[filepath.Clean(id)] + if !ok { + return false, ErrLayerUnknown + } + layer = layerByMount + } + if force { + layer.MountCount = 1 + } + if layer.MountCount > 1 { + layer.MountCount-- + return true, r.saveMounts() + } + err := r.driver.Put(id) + if err == nil || os.IsNotExist(err) { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountCount-- + layer.MountPoint = "" + return false, r.saveMounts() + } + return true, err +} + +func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { + if !r.IsReadWrite() { + return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath()) + } + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + if modified, err := r.mountsLockfile.Modified(); modified || err != nil { + if err = r.loadMounts(); err != nil { + return nil, nil, err + } + } + layer, ok := r.lookup(id) + if !ok { + return nil, nil, ErrLayerUnknown + } + if len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + // We're not using any mappings, so there aren't any unmapped IDs on parent directories. + return nil, nil, nil + } + if layer.MountPoint == "" { + // We don't know which directories to examine. + return nil, nil, ErrLayerNotMounted + } + rootuid, rootgid, err := idtools.GetRootUIDGID(layer.UIDMap, layer.GIDMap) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading root ID values for layer %q", layer.ID) + } + m := idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) + fsuids := make(map[int]struct{}) + fsgids := make(map[int]struct{}) + for dir := filepath.Dir(layer.MountPoint); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + st, err := system.Stat(dir) + if err != nil { + return nil, nil, errors.Wrap(err, "read directory ownership") + } + lst, err := system.Lstat(dir) + if err != nil { + return nil, nil, err + } + fsuid := int(st.UID()) + fsgid := int(st.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + fsuid = int(lst.UID()) + fsgid = int(lst.GID()) + if _, _, err := m.ToContainer(idtools.IDPair{UID: fsuid, GID: rootgid}); err != nil { + fsuids[fsuid] = struct{}{} + } + if _, _, err := m.ToContainer(idtools.IDPair{UID: rootuid, GID: fsgid}); err != nil { + fsgids[fsgid] = struct{}{} + } + } + for uid := range fsuids { + uids = append(uids, uid) + } + for gid := range fsgids { + gids = append(gids, gid) + } + if len(uids) > 1 { + sort.Ints(uids) + } + if len(gids) > 1 { + sort.Ints(gids) + } + return uids, gids, nil +} + +func (r *layerStore) removeName(layer *Layer, name string) { + layer.Names = stringSliceWithoutValue(layer.Names, name) +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (r *layerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *layerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *layerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) + } + r.byname[name] = layer + } + layer.Names = names + return r.Save() +} + +func (r *layerStore) datadir(id string) string { + return filepath.Join(r.layerdir, id) +} + +func (r *layerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) { + if key == "" { + return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve layer big data value for empty name") + } + layer, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) + } + return os.Open(r.datapath(layer.ID, key)) +} + +func (r *layerStore) SetBigData(id, key string, data io.Reader) error { + if key == "" { + return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for layer big data item") + } + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q to write bigdata", id) + } + err := os.MkdirAll(r.datadir(layer.ID), 0700) + if err != nil { + return err + } + + // NewAtomicFileWriter doesn't overwrite/truncate the existing inode. + // BigData() relies on this behaviour when opening the file for read + // so that it is either accessing the old data or the new one. + writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600) + if err != nil { + return errors.Wrapf(err, "error opening bigdata file") + } + + if _, err := io.Copy(writer, data); err != nil { + writer.Close() + return errors.Wrapf(err, "error copying bigdata for the layer") + + } + if err := writer.Close(); err != nil { + return errors.Wrapf(err, "error closing bigdata file for the layer") + } + + addName := true + for _, name := range layer.BigDataNames { + if name == key { + addName = false + break + } + } + if addName { + layer.BigDataNames = append(layer.BigDataNames, key) + return r.Save() + } + return nil +} + +func (r *layerStore) BigDataNames(id string) ([]string, error) { + layer, ok := r.lookup(id) + if !ok { + return nil, errors.Wrapf(ErrImageUnknown, "error locating layer with ID %q to retrieve bigdata names", id) + } + return copyStringSlice(layer.BigDataNames), nil +} + +func (r *layerStore) Metadata(id string) (string, error) { + if layer, ok := r.lookup(id); ok { + return layer.Metadata, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) SetMetadata(id, metadata string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath()) + } + if layer, ok := r.lookup(id); ok { + layer.Metadata = metadata + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) tspath(id string) string { + return filepath.Join(r.layerdir, id+tarSplitSuffix) +} + +// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +func layerHasIncompleteFlag(layer *Layer) bool { + // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil + if flagValue, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := flagValue.(bool); ok && b { + return true + } + } + return false +} + +func (r *layerStore) deleteInternal(id string) error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + // Ensure that if we are interrupted, the layer will be cleaned up. + if !layerHasIncompleteFlag(layer) { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[incompleteFlag] = true + if err := r.Save(); err != nil { + return err + } + } + // We never unset incompleteFlag; below, we remove the entire object from r.layers. + + id = layer.ID + err := r.driver.Remove(id) + if err != nil { + return err + } + + os.Remove(r.tspath(id)) + os.RemoveAll(r.datadir(id)) + delete(r.byid, id) + for _, name := range layer.Names { + delete(r.byname, name) + } + r.idindex.Delete(id) + mountLabel := layer.MountLabel + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + r.deleteInDigestMap(id) + toDeleteIndex := -1 + for i, candidate := range r.layers { + if candidate.ID == id { + toDeleteIndex = i + break + } + } + if toDeleteIndex != -1 { + // delete the layer at toDeleteIndex + if toDeleteIndex == len(r.layers)-1 { + r.layers = r.layers[:len(r.layers)-1] + } else { + r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) + } + } + if mountLabel != "" { + var found bool + for _, candidate := range r.layers { + if candidate.MountLabel == mountLabel { + found = true + break + } + } + if !found { + label.ReleaseLabel(mountLabel) + } + } + + return nil +} + +func (r *layerStore) deleteInDigestMap(id string) { + for digest, layers := range r.bycompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.bycompressedsum[digest] = layers + break + } + } + } + for digest, layers := range r.byuncompressedsum { + for i, layerID := range layers { + if layerID == id { + layers = append(layers[:i], layers[i+1:]...) + r.byuncompressedsum[digest] = layers + break + } + } + } +} + +func (r *layerStore) Delete(id string) error { + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + id = layer.ID + // The layer may already have been explicitly unmounted, but if not, we + // should try to clean that up before we start deleting anything at the + // driver level. + mountCount, err := r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } + for mountCount > 0 { + if _, err := r.Unmount(id, false); err != nil { + return err + } + mountCount, err = r.Mounted(id) + if err != nil { + return errors.Wrapf(err, "error checking if layer %q is still mounted", id) + } + } + if err := r.deleteInternal(id); err != nil { + return err + } + return r.Save() +} + +func (r *layerStore) Lookup(name string) (id string, err error) { + if layer, ok := r.lookup(name); ok { + return layer.ID, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) Exists(id string) bool { + _, ok := r.lookup(id) + return ok +} + +func (r *layerStore) Get(id string) (*Layer, error) { + if layer, ok := r.lookup(id); ok { + return copyLayer(layer), nil + } + return nil, ErrLayerUnknown +} + +func (r *layerStore) Wipe() error { + if !r.IsReadWrite() { + return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) + } + ids := make([]string, 0, len(r.byid)) + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, fromLayer, toLayer *Layer, err error) { + var ok bool + toLayer, ok = r.lookup(to) + if !ok { + return "", "", nil, nil, ErrLayerUnknown + } + to = toLayer.ID + if from == "" { + from = toLayer.Parent + } + if from != "" { + fromLayer, ok = r.lookup(from) + if ok { + from = fromLayer.ID + } else { + fromLayer, ok = r.lookup(toLayer.Parent) + if ok { + from = fromLayer.ID + } + } + } + return from, to, fromLayer, toLayer, nil +} + +func (r *layerStore) layerMappings(layer *Layer) *idtools.IDMappings { + if layer == nil { + return &idtools.IDMappings{} + } + return idtools.NewIDMappingsFromMaps(layer.UIDMap, layer.GIDMap) +} + +func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + return r.driver.Changes(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +type simpleGetCloser struct { + r *layerStore + path string + id string +} + +func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { + return os.Open(filepath.Join(s.path, path)) +} + +func (s *simpleGetCloser) Close() error { + _, err := s.r.Unmount(s.id, false) + return err +} + +func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { + if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { + return getter.DiffGetter(id) + } + path, err := r.Mount(id, drivers.MountOpts{}) + if err != nil { + return nil, err + } + return &simpleGetCloser{ + r: r, + path: path, + id: id, + }, nil +} + +func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + var metadata storage.Unpacker + + from, to, fromLayer, toLayer, err := r.findParentAndLayer(from, to) + if err != nil { + return nil, ErrLayerUnknown + } + // Default to applying the type of compression that we noted was used + // for the layerdiff when it was applied. + compression := toLayer.CompressionType + // If a particular compression type (or no compression) was selected, + // use that instead. + if options != nil && options.Compression != nil { + compression = *options.Compression + } + maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) { + // Depending on whether or not compression is desired, return either the + // passed-in ReadCloser, or a new one that provides its readers with a + // compressed version of the data that the original would have provided + // to its readers. + if compression == archive.Uncompressed { + return rc, nil + } + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + rc.Close() + pwriter.Close() + preader.Close() + return nil, err + } + go func() { + defer pwriter.Close() + defer compressor.Close() + defer rc.Close() + io.Copy(compressor, rc) + }() + return preader, nil + } + + if from != toLayer.Parent { + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + + if ad, ok := r.driver.(drivers.AdditionalLayerStoreDriver); ok { + if aLayer, err := ad.LookupAdditionalLayerByID(to); err == nil { + // This is an additional layer. We leverage blob API for acquiring the reproduced raw blob. + info, err := aLayer.Info() + if err != nil { + aLayer.Release() + return nil, err + } + defer info.Close() + layer := &Layer{} + if err := json.NewDecoder(info).Decode(layer); err != nil { + aLayer.Release() + return nil, err + } + blob, err := aLayer.Blob() + if err != nil { + aLayer.Release() + return nil, err + } + // If layer compression type is different from the expected one, decompress and convert it. + if compression != layer.CompressionType { + diff, err := archive.DecompressStream(blob) + if err != nil { + if err2 := blob.Close(); err2 != nil { + err = errors.Wrapf(err, "failed to close blob file: %v", err2) + } + aLayer.Release() + return nil, err + } + rc, err := maybeCompressReadCloser(diff) + if err != nil { + if err2 := closeAll(blob.Close, diff.Close); err2 != nil { + err = errors.Wrapf(err, "failed to cleanup: %v", err2) + } + aLayer.Release() + return nil, err + } + return ioutils.NewReadCloserWrapper(rc, func() error { + defer aLayer.Release() + return closeAll(blob.Close, rc.Close) + }), nil + } + return ioutils.NewReadCloserWrapper(blob, func() error { defer aLayer.Release(); return blob.Close() }), nil + } + } + + tsfile, err := os.Open(r.tspath(to)) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + diff, err := r.driver.Diff(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) + if err != nil { + return nil, err + } + return maybeCompressReadCloser(diff) + } + + decompressor, err := pgzip.NewReader(tsfile) + if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } + return nil, err + } + + metadata = storage.NewJSONUnpacker(decompressor) + + fgetter, err := r.newFileGetter(to) + if err != nil { + errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter")) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + return nil, errs.ErrorOrNil() + } + + tarstream := asm.NewOutputTarStream(fgetter, metadata) + rc := ioutils.NewReadCloserWrapper(tarstream, func() error { + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream")) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter")) + } + if errs != nil { + return errs.ErrorOrNil() + } + return nil + }) + return maybeCompressReadCloser(rc) +} + +func (r *layerStore) DiffSize(from, to string) (size int64, err error) { + var fromLayer, toLayer *Layer + from, to, fromLayer, toLayer, err = r.findParentAndLayer(from, to) + if err != nil { + return -1, ErrLayerUnknown + } + return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) +} + +func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { + return r.applyDiffWithOptions(to, nil, diff) +} + +func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, diff io.Reader) (size int64, err error) { + if !r.IsReadWrite() { + return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) + } + + layer, ok := r.lookup(to) + if !ok { + return -1, ErrLayerUnknown + } + + header := make([]byte, 10240) + n, err := diff.Read(header) + if err != nil && err != io.EOF { + return -1, err + } + compression := archive.DetectCompression(header[:n]) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + + // Decide if we need to compute digests + var compressedDigest, uncompressedDigest digest.Digest // = "" + var compressedDigester, uncompressedDigester digest.Digester // = nil + if layerOptions != nil && layerOptions.OriginalDigest != "" && + layerOptions.OriginalDigest.Algorithm() == digest.Canonical { + compressedDigest = layerOptions.OriginalDigest + } else { + compressedDigester = digest.Canonical.Digester() + } + if layerOptions != nil && layerOptions.UncompressedDigest != "" && + layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { + uncompressedDigest = layerOptions.UncompressedDigest + } else { + uncompressedDigester = digest.Canonical.Digester() + } + + var compressedWriter io.Writer + if compressedDigester != nil { + compressedWriter = compressedDigester.Hash() + } else { + compressedWriter = ioutil.Discard + } + compressedCounter := ioutils.NewWriteCounter(compressedWriter) + defragmented = io.TeeReader(defragmented, compressedCounter) + + tsdata := bytes.Buffer{} + compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) + if err != nil { + compressor = pgzip.NewWriter(&tsdata) + } + if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that + logrus.Infof("Error setting compression concurrency threads to 1: %v; ignoring", err) + } + metadata := storage.NewJSONPacker(compressor) + uncompressed, err := archive.DecompressStream(defragmented) + if err != nil { + return -1, err + } + defer uncompressed.Close() + uidLog := make(map[uint32]struct{}) + gidLog := make(map[uint32]struct{}) + idLogger, err := tarlog.NewLogger(func(h *tar.Header) { + if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) { + uidLog[uint32(h.Uid)] = struct{}{} + gidLog[uint32(h.Gid)] = struct{}{} + } + }) + if err != nil { + return -1, err + } + defer idLogger.Close() + uncompressedCounter := ioutils.NewWriteCounter(idLogger) + uncompressedWriter := (io.Writer)(uncompressedCounter) + if uncompressedDigester != nil { + uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash()) + } + payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + options := drivers.ApplyDiffOpts{ + Diff: payload, + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, options) + if err != nil { + return -1, err + } + compressor.Close() + if err == nil { + if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { + return -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + return -1, err + } + } + if compressedDigester != nil { + compressedDigest = compressedDigester.Digest() + } + if uncompressedDigester != nil { + uncompressedDigest = uncompressedDigester.Digest() + } + + updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { + var newList []string + if oldvalue != "" { + for _, value := range (*m)[oldvalue] { + if value != id { + newList = append(newList, value) + } + } + if len(newList) > 0 { + (*m)[oldvalue] = newList + } else { + delete(*m, oldvalue) + } + } + if newvalue != "" { + (*m)[newvalue] = append((*m)[newvalue], id) + } + } + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID) + layer.CompressedDigest = compressedDigest + layer.CompressedSize = compressedCounter.Count + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID) + layer.UncompressedDigest = uncompressedDigest + layer.UncompressedSize = uncompressedCounter.Count + layer.CompressionType = compression + layer.UIDs = make([]uint32, 0, len(uidLog)) + for uid := range uidLog { + layer.UIDs = append(layer.UIDs, uid) + } + sort.Slice(layer.UIDs, func(i, j int) bool { + return layer.UIDs[i] < layer.UIDs[j] + }) + layer.GIDs = make([]uint32, 0, len(gidLog)) + for gid := range gidLog { + layer.GIDs = append(layer.GIDs, gid) + } + sort.Slice(layer.GIDs, func(i, j int) bool { + return layer.GIDs[i] < layer.GIDs[j] + }) + + err = r.Save() + + return size, err +} + +func (r *layerStore) DifferTarget(id string) (string, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return "", ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return "", ErrLayerUnknown + } + return ddriver.DifferTarget(layer.ID) +} + +func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return ErrNotSupported + } + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options) + if err != nil { + return err + } + layer.UIDs = diffOutput.UIDs + layer.GIDs = diffOutput.GIDs + layer.UncompressedDigest = diffOutput.UncompressedDigest + layer.UncompressedSize = diffOutput.Size + layer.Metadata = diffOutput.Metadata + if err = r.Save(); err != nil { + return err + } + for k, v := range diffOutput.BigData { + if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil { + r.Delete(id) + return err + } + } + return err +} + +func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return nil, ErrNotSupported + } + + if to == "" { + output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ) + return &output, err + } + + layer, ok := r.lookup(to) + if !ok { + return nil, ErrLayerUnknown + } + if options == nil { + options = &drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + } + } + output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ) + if err != nil { + return nil, err + } + layer.UIDs = output.UIDs + layer.GIDs = output.GIDs + err = r.Save() + return &output, err +} + +func (r *layerStore) CleanupStagingDirectory(stagingDirectory string) error { + ddriver, ok := r.driver.(drivers.DriverWithDiffer) + if !ok { + return ErrNotSupported + } + return ddriver.CleanupStagingDirectory(stagingDirectory) +} + +func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { + var layers []Layer + for _, layerID := range m[d] { + layer, ok := r.lookup(layerID) + if !ok { + return nil, ErrLayerUnknown + } + layers = append(layers, *copyLayer(layer)) + } + return layers, nil +} + +func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.bycompressedsum, d) +} + +func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.byuncompressedsum, d) +} + +func (r *layerStore) Lock() { + r.lockfile.Lock() +} + +func (r *layerStore) RecursiveLock() { + r.lockfile.RecursiveLock() +} + +func (r *layerStore) RLock() { + r.lockfile.RLock() +} + +func (r *layerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *layerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *layerStore) Modified() (bool, error) { + var mmodified, tmodified bool + lmodified, err := r.lockfile.Modified() + if err != nil { + return lmodified, err + } + if r.IsReadWrite() { + r.mountsLockfile.RLock() + defer r.mountsLockfile.Unlock() + mmodified, err = r.mountsLockfile.Modified() + if err != nil { + return lmodified, err + } + } + + if lmodified || mmodified { + return true, nil + } + + // If the layers.json file has been modified manually, then we have to + // reload the storage in any case. + info, err := os.Stat(r.layerspath()) + if err != nil && !os.IsNotExist(err) { + return false, errors.Wrap(err, "stat layers file") + } + if info != nil { + tmodified = info.ModTime() != r.layerspathModified + r.layerspathModified = info.ModTime() + } + + return tmodified, nil +} + +func (r *layerStore) IsReadWrite() bool { + return r.lockfile.IsReadWrite() +} + +func (r *layerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} + +func (r *layerStore) Locked() bool { + return r.lockfile.Locked() +} + +func (r *layerStore) ReloadIfChanged() error { + r.loadMut.Lock() + defer r.loadMut.Unlock() + + modified, err := r.Modified() + if err == nil && modified { + return r.Load() + } + return err +} + +func closeAll(closes ...func() error) (rErr error) { + for _, f := range closes { + if err := f(); err != nil { + if rErr == nil { + rErr = errors.Wrapf(err, "close error") + continue + } + rErr = errors.Wrapf(rErr, "%v", err) + } + } + return +} diff --git a/vendor/github.com/containers/storage/lockfile_compat.go b/vendor/github.com/containers/storage/lockfile_compat.go new file mode 100644 index 00000000000..6fac2ebac63 --- /dev/null +++ b/vendor/github.com/containers/storage/lockfile_compat.go @@ -0,0 +1,15 @@ +package storage + +import ( + "github.com/containers/storage/pkg/lockfile" +) + +type Locker = lockfile.Locker + +func GetLockfile(path string) (lockfile.Locker, error) { + return lockfile.GetLockfile(path) +} + +func GetROLockfile(path string) (lockfile.Locker, error) { + return lockfile.GetROLockfile(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/README.md b/vendor/github.com/containers/storage/pkg/archive/README.md new file mode 100644 index 00000000000..7307d9694f6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 00000000000..d4f129ee634 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1521 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "fmt" + "io" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/pkg/unshare" + gzip "github.com/klauspost/pgzip" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + IgnoreChownErrors bool + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // This is additional data to be used by the converter. It will + // not survive a round trip through JSON, so it's primarily + // intended for generating archives (i.e., converting writes). + WhiteoutData interface{} + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool + // ForceMask, if set, indicates the permission mask used for created files. + ForceMask *os.FileMode + } +) + +const ( + tarExt = "tar" + solaris = "solaris" + windows = "windows" + containersOverrideXattr = "user.containers.override_stat" +) + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + +// Archiver allows the reuse of most utility functions of this package with a +// pluggable Untar function. To facilitate the passing of specific id mappings +// for untar, an archiver can be created with maps which will then be passed to +// Untar operations. If ChownOpts is set, its values are mapped using +// UntarIDMappings before being used to create files and directories on disk. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + TarIDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + UntarIDMappings *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, TarIDMappings: &idtools.IDMappings{}, UntarIDMappings: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +// overwriteError is used to differentiate errors related to attempting to +// overwrite a directory with a non-directory or vice-versa. When testing +// copying a file over a directory, this error is expected in order for the +// test to pass. +type overwriteError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz + // Zstd is zstd compression algorithm. + Zstd +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, err := xz.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return readBufWrapper, nil + case Zstd: + return zstdReader(buf) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Zstd: + return zstdWriter(dest) + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return tarExt + case Bzip2: + return tarExt + ".bz2" + case Gzip: + return tarExt + ".gz" + case Xz: + return tarExt + ".xz" + case Zstd: + return tarExt + ".zst" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability, security,image +// xattrs from filesystem to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + for _, xattr := range []string{"security.capability", "security.ima"} { + capability, err := system.Lgetxattr(path, xattr) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return errors.Wrapf(err, "failed to read %q attribute from %q", xattr, path) + } + if capability != nil { + hdr.Xattrs[xattr] = string(capability) + } + } + return nil +} + +// ReadUserXattrToTarHeader reads user.* xattr from filesystem to a tar header +func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { + xattrs, err := system.Llistxattr(path) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(path, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", path, key) + continue + } + return err + } + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[key] = string(value) + } + } + return nil +} + +type TarWhiteoutHandler interface { + Setxattr(path, name string, value []byte) error + Mknod(path string, mode uint32, dev int) error + Chown(path string, uid, gid int) error +} + +type TarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) + ConvertReadWithHandler(*tar.Header, string, TarWhiteoutHandler) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter TarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + if err := ReadUserXattrToTarHeader(path, hdr); err != nil { + return err + } + if ta.CopyPass { + copyPassHeader(hdr) + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + maybeTruncateHeaderModTime(hdr) + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns, ignoreChownErrors bool, forceMask *os.FileMode, buffer []byte) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + mask := hdrInfo.Mode() + if forceMask != nil { + mask = *forceMask + } + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, mask); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, mask) + if err != nil { + return err + } + if _, err := io.CopyBuffer(file, reader, buffer); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + logrus.Debugf("Tar: Can't create device %v while running in user namespace", path) + return nil + } + fallthrough + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + if forceMask != nil && hdr.Typeflag != tar.TypeSymlink { + value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777) + if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != windows { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID) + if err != nil { + if ignoreChownErrors { + fmt.Fprintf(os.Stderr, "Chown error detected. Ignoring due to ignoreChownErrors flag: %v\n", err) + } else { + return err + } + } + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo, forceMask); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + + var errs []string + for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. We also ignore EPERM errors if we are running in a + // user namespace. + errs = append(errs, err.Error()) + continue + } + return err + } + + } + + if len(errs) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errs, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + matches, err := pm.IsMatch(relFilePath) + if err != nil { + logrus.Errorf("Matching %s: %v", relFilePath, err) + return err + } + skip = matches + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !d.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + buffer := make([]byte, 1<<20) + + doChown := !options.NoLchown + if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false + uid, gid, mode, err := GetFileOwner(dest) + if err == nil { + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + return err + } + } + } + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return overwriteError(fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + chownOpts := options.ChownOpts + if err := remapIDs(nil, idMappings, chownOpts, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if chownOpts != nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarMappings := archiver.TarIDMappings + if tarMappings == nil { + tarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + Compression: Uncompressed, + CopyPass: true, + InUserNS: userns.RunningInUserNS(), + } + archive, err := TarWithOptions(src, options) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options = &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: userns.RunningInUserNS(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + untarMappings := archiver.UntarIDMappings + if untarMappings == nil { + untarMappings = &idtools.IDMappings{} + } + options := &TarOptions{ + UIDMaps: untarMappings.UIDs(), + GIDMaps: untarMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: userns.RunningInUserNS(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.UntarIDMappings.RootPair() + if archiver.ChownOpts != nil { + rootIDs = *archiver.ChownOpts + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) + + if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + options := &TarOptions{ + UIDMaps: archiver.UntarIDMappings.UIDs(), + GIDMaps: archiver.UntarIDMappings.GIDs(), + ChownOpts: archiver.ChownOpts, + InUserNS: userns.RunningInUserNS(), + NoOverwriteDirNonDir: true, + } + err = archiver.Untar(r, filepath.Dir(dst), options) + if err != nil { + r.CloseWithError(err) + } + return err +} + +func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, hdr *tar.Header) (err error) { + var uid, gid int + if chownOpts != nil { + uid, gid = chownOpts.UID, chownOpts.GID + } else { + if readIDMappings != nil && !readIDMappings.Empty() { + uid, gid, err = readIDMappings.ToContainer(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + if err != nil { + return err + } + } else { + uid, gid = hdr.Uid, hdr.Gid + } + } + ids := idtools.IDPair{UID: uid, GID: gid} + if writeIDMappings != nil && !writeIDMappings.Empty() { + ids, err = writeIDMappings.ToHost(ids) + if err != nil { + return err + } + } + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return NewDefaultArchiver().UntarPath(src, dst) +} + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +// NewArchiver returns a new Archiver +func NewArchiver(idMappings *idtools.IDMappings) *Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings} +} + +// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver { + if tarIDMappings == nil { + tarIDMappings = &idtools.IDMappings{} + } + if untarIDMappings == nil { + untarIDMappings = &idtools.IDMappings{} + } + return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings} +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := tar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + } + hashWorker.Wait() + if err == nil { + err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} + +// TarPath returns a function which creates an archive of a specified +// location in the container's filesystem, mapping permissions using the +// container's ID maps +func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) { + tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + return func(path string) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{ + Compression: Uncompressed, + UIDMaps: tarMappings.UIDs(), + GIDMaps: tarMappings.GIDs(), + }) + } +} + +// GetOverlayXattrName returns the xattr used by the overlay driver with the +// given name. +// It uses the trusted.overlay prefix when running as root, and user.overlay +// in rootless mode. +func GetOverlayXattrName(name string) string { + if unshare.IsRootless() { + return fmt.Sprintf("user.overlay.%s", name) + } + return fmt.Sprintf("trusted.overlay.%s", name) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 00000000000..7bc44a5665e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,22 @@ +// +build go1.10 + +package archive + +import ( + "archive/tar" + "time" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { + if hdr.Format == tar.FormatUnknown { + // one of the first things archive/tar does is round this + // value, possibly up, if the format isn't specified, while we + // are much better equipped to handle truncation when scanning + // for changes between source and an extracted copy of this + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + } +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 00000000000..d19811fdbca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,13 @@ +// +build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} + +func maybeTruncateHeaderModTime(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go new file mode 100644 index 00000000000..7c307ffcfe5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go @@ -0,0 +1,125 @@ +// +build freebsd + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/userns" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if userns.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, uint64(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 00000000000..51fbd9a2197 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,191 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func getOverlayOpaqueXattrName() string { + return GetOverlayXattrName("opaque") +} + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + if rolayers, ok := data.([]string); ok && len(rolayers) > 0 { + return overlayWhiteoutConverter{rolayers: rolayers} + } + return overlayWhiteoutConverter{rolayers: nil} + } + return nil +} + +type overlayWhiteoutConverter struct { + rolayers []string +} + +func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix + opaque, err := system.Lgetxattr(path, getOverlayOpaqueXattrName()) + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, getOverlayOpaqueXattrName()) + } + // If there are no lower layers, then it can't have been deleted in this layer. + if len(o.rolayers) == 0 { + return nil, nil + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, rolayer := range o.rolayers { + stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return nil, nil + } + } + // It's not whiteout, so it was there in the older layer, so we need to + // add a whiteout for this item in this layer. + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + break + } + for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory in a parent layer. + stat, statErr := os.Stat(filepath.Join(rolayer, dir)) + if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) { + // Not sure what happened here. + return nil, statErr + } + if statErr == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + // If it's whiteout for a parent directory, then the + // original directory wasn't inherited into this layer, + // so we don't need to emit whiteout for it. + if isWhiteOut(stat) { + return nil, nil + } + } + } + } + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path string, handler TarWhiteoutHandler) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := handler.Setxattr(dir, getOverlayOpaqueXattrName(), []byte{'y'}) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + // If someone does: + // rm -rf /foo/bar + // in an image, some tools will generate a layer with: + // /.wh.foo + // /foo/.wh.bar + // and when doing the second mknod(), we will fail with + // ENOTDIR, since the previous /foo was mknod()'d as a + // character device node and not a directory. + if isENOTDIR(err) { + return false, nil + } + return false, err + } + if err := handler.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +type directHandler struct { +} + +func (d directHandler) Setxattr(path, name string, value []byte) error { + return unix.Setxattr(path, name, value, 0) +} + +func (d directHandler) Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +func (d directHandler) Chown(path string, uid, gid int) error { + return idtools.SafeChown(path, uid, gid) +} + +func (o overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + var handler directHandler + return o.ConvertReadWithHandler(hdr, path, handler) +} + +func isWhiteOut(stat os.FileInfo) bool { + s := stat.Sys().(*syscall.Stat_t) + return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + f, err := os.Stat(path) + if err != nil { + return 0, 0, 0, err + } + s, ok := f.Sys().(*syscall.Stat_t) + if ok { + return s.Uid, s.Gid, s.Mode & 07777, nil + } + return 0, 0, uint32(f.Mode()), nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 00000000000..4b8834444f0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,11 @@ +// +build !linux + +package archive + +func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { + return nil +} + +func GetFileOwner(path string) (uint32, uint32, uint32, error) { + return 0, 0, 0, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 00000000000..7c3e442dad9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,120 @@ +// +build !windows,!freebsd + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + permissionsMask := hdrInfo.Mode() + if forceMask != nil { + permissionsMask = *forceMask + } + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, permissionsMask); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 00000000000..a0872444f32 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,79 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go new file mode 100644 index 00000000000..36b7118aa85 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_zstd.go @@ -0,0 +1,41 @@ +package archive + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 00000000000..c7bb25d0f17 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,500 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "syscall" + "time" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +func aufsWhiteoutPresent(root, path string) (bool, error) { + f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) + _, err := os.Stat(filepath.Join(root, f)) + if err == nil { + return true, nil + } + if os.IsNotExist(err) || isENOTDIR(err) { + return false, nil + } + return false, err +} + +func isENOTDIR(err error) bool { + if err == nil { + return false + } + if err == syscall.ENOTDIR { + return true + } + if perror, ok := err.(*os.PathError); ok { + if errno, ok := perror.Err.(syscall.Errno); ok { + return errno == syscall.ENOTDIR + } + } + return false +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) +type whiteoutChange func(string, string) (bool, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + layerScan: + for _, layer := range layers { + if wc != nil { + // ...Unless a lower layer also had whiteout for this directory or one of its parents, + // in which case, it's new + ignore, err := wc(layer, path) + if err != nil { + return err + } + if ignore { + break layerScan + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + ignore, err = wc(layer, dir) + if err != nil { + return err + } + if ignore { + break layerScan + } + } + } + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + tail := []Change{} + for parent != "/" { + if _, ok := changedDirs[parent]; !ok && parent != "/" { + tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...) + changedDirs[parent] = struct{}{} + } + parent = filepath.Dir(parent) + } + changes = append(changes, tail...) + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + idMappings *idtools.IDMappings + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool + xattrs map[string]string +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, oldInfo, newStat, info) || + !bytes.Equal(oldChild.capability, newChild.capability) || + !reflect.DeepEqual(oldChild.xattrs, newChild.xattrs) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo(idMappings *idtools.IDMappings) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + idMappings: idMappings, + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string, oldMappings *idtools.IDMappings) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir, oldMappings, newMappings) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 00000000000..a3addebe690 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,401 @@ +package archive + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo + idmap1 *idtools.IDMappings + idmap2 *idtools.IDMappings +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string, idmap1, idmap2 *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(idmap1), + root2: newRootFileInfo(idmap2), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: root.idMappings, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, err = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + return err + } + xattrs, err := system.Llistxattr(cpath) + if err != nil && !errors.Is(err, system.EOPNOTSUPP) { + return err + } + for _, key := range xattrs { + if strings.HasPrefix(key, "user.") { + value, err := system.Lgetxattr(cpath, key) + if err != nil { + if errors.Is(err, system.E2BIG) { + logrus.Errorf("archive: Skipping xattr for file %s since value is too big: %s", cpath, key) + continue + } + return err + } + if info.xattrs == nil { + info.xattrs = make(map[string]string) + } + info.xattrs[key] = string(value) + } + } + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + builder := make([]byte, 0, dirent.Reclen) + for i := 0; i < len(dirent.Name); i++ { + if dirent.Name[i] == 0 { + break + } + builder = append(builder, byte(dirent.Name[i])) + } + name := string(builder) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + dc := func(root, path string, fi os.FileInfo) (string, error) { + return overlayDeletedFile(layers, root, path, fi) + } + return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout) +} + +func overlayLowerContainsWhiteout(root, path string) (bool, error) { + // Whiteout for a file or directory has the same name, but is for a character + // device with major/minor of 0/0. + stat, err := os.Stat(filepath.Join(root, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return false, err + } + if err == nil && stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return true, nil + } + } + return false, nil +} + +func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) { + // If it's a whiteout item, then a file or directory with that name is removed by this layer. + if fi.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(fi) { + return path, nil + } + } + // After this we only need to pay attention to directories. + if !fi.IsDir() { + return "", nil + } + // If the directory isn't marked as opaque, then it's just a normal directory. + opaque, err := system.Lgetxattr(filepath.Join(root, path), getOverlayOpaqueXattrName()) + if err != nil { + return "", err + } + if len(opaque) != 1 || opaque[0] != 'y' { + return "", err + } + // If there are no lower layers, then it can't have been deleted and recreated in this layer. + if len(layers) == 0 { + return "", err + } + // At this point, we have a directory that's opaque. If it appears in one of the lower + // layers, then it was newly-created here, so it wasn't also deleted here. + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + // It's not whiteout, so it was there in the older layer, so it has to be + // marked as deleted in this layer. + return path, nil + } + for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) { + // Check for whiteout for a parent directory. + stat, err := os.Stat(filepath.Join(layer, dir)) + if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) { + // Not sure what happened here. + return "", err + } + if err == nil { + if stat.Mode()&os.ModeCharDevice != 0 { + if isWhiteOut(stat) { + return "", nil + } + } + } + } + } + + // We didn't find the same path in any older layers, so it was new in this one. + return "", nil + +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 00000000000..8769f2291b6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,101 @@ +//go:build !linux +// +build !linux + +package archive + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtools.IDMappings) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir, oldIDMap) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir, newIDMap) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { + root := newRootFileInfo(idMappings) + + err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + idMappings: idMappings, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 00000000000..1cc1910f897 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,50 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + // Don't look at size for dirs, its not a good measure of change + oldUID, oldGID := oldStat.UID(), oldStat.GID() + uid, gid := newStat.UID(), newStat.GID() + if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil { + uid = uint32(cuid) + gid = uint32(cgid) + if oldInfo != nil { + if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil { + oldUID = uint32(oldcuid) + oldGID = uint32(oldcgid) + } + } + } + ownerChanged := uid != oldUID || gid != oldGID + if oldStat.Mode() != newStat.Mode() || + ownerChanged || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 00000000000..966400e5940 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 00000000000..6298a674d49 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,460 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !filepath.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 00000000000..e305b5e4af9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 00000000000..2b775b45c4f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 00000000000..ca8832fe421 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,258 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + buffer := make([]byte, 1<<20) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == windows { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = os.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(nil, idMappings, options.ChownOpts, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 00000000000..3448569b1eb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 00000000000..e85aac05408 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 00000000000..d20478a10dc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 00000000000..b39d12c8780 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go new file mode 100644 index 00000000000..e874eb74e05 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -0,0 +1,186 @@ +package chrootarchive + +import ( + stdtar "archive/tar" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" + "sync" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runc/libcontainer/userns" + "github.com/pkg/errors" +) + +func init() { + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("storage") + _, _ = net.LookupHost("localhost") +} + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiver(idMappings) + archiver.Untar = Untar + return archiver +} + +// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends +func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver { + archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings) + archiver.Untar = Untar + return archiver +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true, dest) +} + +// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory +// The root directory is the directory that will be chrooted to. +// `dest` must be a path within `root`, if it is not an error will be returned. +// +// `root` should set to a directory which is not controlled by any potentially +// malicious process. +// +// This should be used to prevent a potential attacker from manipulating `dest` +// such that it would provide access to files outside of `dest` through things +// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however +// sanitizing symlinks in this manner is inherrently racey: +// ref: CVE-2018-15664 +func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + return untarHandler(tarArchive, dest, options, true, root) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false, dest) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + options.InUserNS = userns.RunningInUserNS() + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options, root) +} + +// Tar tars the requested path while chrooted to the specified root. +func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if options == nil { + options = &archive.TarOptions{} + } + return invokePack(srcPath, options, root) +} + +// CopyFileWithTarAndChown returns a function which copies a single file from outside +// of any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + contentReader, contentWriter, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pipe extract data to %q", dest) + } + defer contentReader.Close() + defer contentWriter.Close() + var hashError error + var hashWorker sync.WaitGroup + hashWorker.Add(1) + go func() { + t := stdtar.NewReader(contentReader) + _, err := t.Next() + if err != nil { + hashError = err + } + if _, err = io.Copy(hasher, t); err != nil && err != io.EOF { + hashError = err + } + hashWorker.Done() + }() + if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil { + err = errors.Wrapf(err, "error extracting data to %q while copying", dest) + } + hashWorker.Wait() + if err == nil { + err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest) + } + return err + } + } + return archiver.CopyFileWithTar +} + +// CopyWithTarAndChown returns a function which copies a directory tree from outside of +// any container into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.CopyWithTar +} + +// UntarPathAndChown returns a function which extracts an archive in a specified +// location into our working container, mapping permissions using the +// container's ID maps, possibly overridden using the passed-in chownOpts +func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error { + untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap) + archiver := NewArchiverWithChown(nil, chownOpts, untarMappings) + if hasher != nil { + originalUntar := archiver.Untar + archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return originalUntar(io.TeeReader(tarArchive, hasher), dest, options) + } + } + return archiver.UntarPath +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000000..9da10fe33cd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,207 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/pkg/errors" +) + +// untar is the entry-point for storage-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + dst := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = dst + } + + if err := chroot(root); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, dst, &options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + if root == "" { + return errors.New("must specify a root to chroot to") + } + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + if root != "" { + relDest, err := filepath.Rel(root, dest) + if err != nil { + return err + } + if relDest == "." { + relDest = "/" + } + if relDest[0] != '/' { + relDest = "/" + relDest + } + dest = relDest + } + + cmd := reexec.Command("storage-untar", dest, root) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} + +func tar() { + runtime.LockOSThread() + flag.Parse() + + src := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = src + } + + if err := realChroot(root); err != nil { + fatal(err) + } + + var options archive.TarOptions + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + rdr, err := archive.TarWithOptions(src, &options) + if err != nil { + fatal(err) + } + defer rdr.Close() + + if _, err := io.Copy(os.Stdout, rdr); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if root == "" { + return nil, errors.New("root path must not be empty") + } + + relSrc, err := filepath.Rel(root, srcPath) + if err != nil { + return nil, err + } + if relSrc == "." { + relSrc = "/" + } + if relSrc[0] != '/' { + relSrc = "/" + relSrc + } + + // make sure we didn't trim a trailing slash with the call to `Rel` + if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { + relSrc += "/" + } + + cmd := reexec.Command("storage-tar", relSrc, root) + + errBuff := bytes.NewBuffer(nil) + cmd.Stderr = errBuff + + tarR, tarW := io.Pipe() + cmd.Stdout = tarW + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, errors.Wrap(err, "error getting options pipe for tar process") + } + + if err := cmd.Start(); err != nil { + return nil, errors.Wrap(err, "tar error on re-exec cmd") + } + + go func() { + err := cmd.Wait() + err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + tarW.CloseWithError(err) + }() + + if err := json.NewEncoder(stdin).Encode(options); err != nil { + stdin.Close() + return nil, errors.Wrap(err, "tar json encode to pipe failed") + } + stdin.Close() + + return tarR, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go new file mode 100644 index 00000000000..8a5c680b146 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,29 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions, root string) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the pack. We call inline instead within the daemon process. + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 00000000000..76c94c6c1e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,114 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/mount" + "github.com/syndtr/gocapability/capability" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + caps, err := capability.NewPid(0) + if err != nil { + return err + } + + // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot + if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { + return realChroot(path) + } + + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 00000000000..83278ee5051 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!linux + +package chrootarchive + +import "golang.org/x/sys/unix" + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} + +func chroot(path string) error { + return realChroot(path) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go new file mode 100644 index 00000000000..68b8f74f775 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000000..84253c6aa9b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,129 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/userns" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for storage-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := userns.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if userns.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("storage-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go new file mode 100644 index 00000000000..8f8e88bfbea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000000..ea08135e4d5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("storage-applyLayer", applyLayer) + reexec.Register("storage-untar", untar) + reexec.Register("storage-tar", tar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go new file mode 100644 index 00000000000..fa17c9bf831 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go new file mode 100644 index 00000000000..63f9704564c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/jsoniter.go @@ -0,0 +1,5 @@ +package chrootarchive + +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go new file mode 100644 index 00000000000..b8b278a1329 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -0,0 +1,627 @@ +package chunked + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + storage "github.com/containers/storage" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + jsoniter "github.com/json-iterator/go" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + cacheKey = "chunked-manifest-cache" + cacheVersion = 1 +) + +type metadata struct { + tagLen int + digestLen int + tags []byte + vdata []byte +} + +type layer struct { + id string + metadata *metadata + target string +} + +type layersCache struct { + layers []layer + refs int + store storage.Store + mutex sync.RWMutex + created time.Time +} + +var cacheMutex sync.Mutex +var cache *layersCache + +func (c *layersCache) release() { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + c.refs-- + if c.refs == 0 { + cache = nil + } +} + +func getLayersCacheRef(store storage.Store) *layersCache { + cacheMutex.Lock() + defer cacheMutex.Unlock() + if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + cache.refs++ + return cache + } + cache := &layersCache{ + store: store, + refs: 1, + created: time.Now(), + } + return cache +} + +func getLayersCache(store storage.Store) (*layersCache, error) { + c := getLayersCacheRef(store) + + if err := c.load(); err != nil { + c.release() + return nil, err + } + return c, nil +} + +func (c *layersCache) load() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + allLayers, err := c.store.Layers() + if err != nil { + return err + } + existingLayers := make(map[string]string) + for _, r := range c.layers { + existingLayers[r.id] = r.target + } + + currentLayers := make(map[string]string) + for _, r := range allLayers { + currentLayers[r.ID] = r.ID + if _, found := existingLayers[r.ID]; found { + continue + } + + bigData, err := c.store.LayerBigData(r.ID, cacheKey) + // if the cache areadly exists, read and use it + if err == nil { + defer bigData.Close() + metadata, err := readMetadataFromCache(bigData) + if err == nil { + c.addLayer(r.ID, metadata) + continue + } + logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) + } else if errors.Cause(err) != os.ErrNotExist { + return err + } + + // otherwise create it from the layer TOC. + manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + if err != nil { + continue + } + defer manifestReader.Close() + + manifest, err := ioutil.ReadAll(manifestReader) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + + metadata, err := writeCache(manifest, r.ID, c.store) + if err == nil { + c.addLayer(r.ID, metadata) + } + } + + var newLayers []layer + for _, l := range c.layers { + if _, found := currentLayers[l.id]; found { + newLayers = append(newLayers, l) + } + } + c.layers = newLayers + + return nil +} + +// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file +// is usable for deduplication with hardlinks. +// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. +func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { + digester := digest.Canonical.Digester() + + modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) + hash := digester.Hash() + + if _, err := hash.Write([]byte(f.Digest)); err != nil { + return "", err + } + + if _, err := hash.Write([]byte(modeString)); err != nil { + return "", err + } + + if len(f.Xattrs) > 0 { + keys := make([]string, 0, len(f.Xattrs)) + for k := range f.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if _, err := hash.Write([]byte(k)); err != nil { + return "", err + } + if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil { + return "", err + } + } + } + return string(digester.Digest()), nil +} + +// generateFileLocation generates a file location in the form $OFFSET@$PATH +func generateFileLocation(path string, offset uint64) []byte { + return []byte(fmt.Sprintf("%d@%s", offset, path)) +} + +// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. +// the [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the metadata file header. +func generateTag(digest string, offset, len uint64) string { + return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +} + +type setBigData interface { + // SetLayerBigData stores a (possibly large) chunk of named data + SetLayerBigData(id, key string, data io.Reader) error +} + +// writeCache write a cache for the layer ID. +// It generates a sorted list of digests with their offset to the path location and offset. +// The same cache is used to lookup files, chunks and candidates for deduplication with hard links. +// There are 3 kind of digests stored: +// - digest(file.payload)) +// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) +// - digest(i) for each i in chunks(file payload) +func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) { + var vdata bytes.Buffer + tagLen := 0 + digestLen := 0 + var tagsBuffer bytes.Buffer + + toc, err := prepareMetadata(manifest) + if err != nil { + return nil, err + } + + var tags []string + for _, k := range toc { + if k.Digest != "" { + location := generateFileLocation(k.Name, 0) + + off := uint64(vdata.Len()) + l := uint64(len(location)) + + d := generateTag(k.Digest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + fp, err := calculateHardLinkFingerprint(k) + if err != nil { + return nil, err + } + d = generateTag(fp, off, l) + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + + digestLen = len(k.Digest) + } + if k.ChunkDigest != "" { + location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + off := uint64(vdata.Len()) + l := uint64(len(location)) + d := generateTag(k.ChunkDigest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + digestLen = len(k.ChunkDigest) + } + } + + sort.Strings(tags) + + for _, t := range tags { + if _, err := tagsBuffer.Write([]byte(t)); err != nil { + return nil, err + } + } + + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer pipeWriter.Close() + defer close(errChan) + + // version + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { + errChan <- err + return + } + + // len of a tag + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { + errChan <- err + return + } + + // len of a digest + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { + errChan <- err + return + } + + // tags length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + errChan <- err + return + } + + // vdata length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { + errChan <- err + return + } + + // tags + if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { + errChan <- err + return + } + + // variable length data + if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { + errChan <- err + return + } + + errChan <- nil + }() + defer pipeReader.Close() + + counter := ioutils.NewWriteCounter(ioutil.Discard) + + r := io.TeeReader(pipeReader, counter) + + if err := dest.SetLayerBigData(id, cacheKey, r); err != nil { + return nil, err + } + + if err := <-errChan; err != nil { + return nil, err + } + + logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) + + return &metadata{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + }, nil +} + +func readMetadataFromCache(bigData io.Reader) (*metadata, error) { + var version, tagLen, digestLen, tagsLen, vdataLen uint64 + if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { + return nil, err + } + if version != cacheVersion { + return nil, nil + } + if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil { + return nil, err + } + + tags := make([]byte, tagsLen) + if _, err := bigData.Read(tags); err != nil { + return nil, err + } + + vdata := make([]byte, vdataLen) + if _, err := bigData.Read(vdata); err != nil { + return nil, err + } + + return &metadata{ + tagLen: int(tagLen), + digestLen: int(digestLen), + tags: tags, + vdata: vdata, + }, nil +} + +func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { + toc, err := unmarshalToc(manifest) + if err != nil { + // ignore errors here. They might be caused by a different manifest format. + return nil, nil + } + + var r []*internal.FileMetadata + chunkSeen := make(map[string]bool) + for i := range toc.Entries { + d := toc.Entries[i].Digest + if d != "" { + r = append(r, &toc.Entries[i]) + continue + } + + // chunks do not use hard link dedup so keeping just one candidate is enough + cd := toc.Entries[i].ChunkDigest + if cd != "" && !chunkSeen[cd] { + r = append(r, &toc.Entries[i]) + chunkSeen[cd] = true + } + } + return r, nil +} + +func (c *layersCache) addLayer(id string, metadata *metadata) error { + target, err := c.store.DifferTarget(id) + if err != nil { + return fmt.Errorf("get checkout directory layer %q: %w", id, err) + } + + l := layer{ + id: id, + metadata: metadata, + target: target, + } + c.layers = append(c.layers, l) + return nil +} + +func byteSliceAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func findTag(digest string, metadata *metadata) (string, uint64, uint64) { + if len(digest) != metadata.digestLen { + return "", 0, 0 + } + + nElements := len(metadata.tags) / metadata.tagLen + + i := sort.Search(nElements, func(i int) bool { + d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + return strings.Compare(d, digest) >= 0 + }) + if i < nElements { + d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + if digest == d { + startOff := i*metadata.tagLen + metadata.digestLen + parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) + return digest, uint64(off), uint64(len) + } + } + return "", 0, 0 +} + +func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { + if digest == "" { + return "", "", -1, nil + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, layer := range c.layers { + digest, off, len := findTag(digest, layer.metadata) + if digest != "" { + position := string(layer.metadata.vdata[off : off+len]) + parts := strings.SplitN(position, "@", 2) + offFile, _ := strconv.ParseInt(parts[0], 10, 64) + return layer.target, parts[1], offFile, nil + } + } + + return "", "", -1, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// file is the file to look for. +func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { + digest := file.Digest + if useHardLinks { + var err error + digest, err = calculateHardLinkFingerprint(file) + if err != nil { + return "", "", err + } + } + target, name, off, err := c.findDigestInternal(digest) + if off == 0 { + return target, name, err + } + return "", "", nil +} + +func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { + return c.findDigestInternal(chunk.ChunkDigest) +} + +func unmarshalToc(manifest []byte) (*internal.TOC, error) { + var buf bytes.Buffer + count := 0 + var toc internal.TOC + + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + count += len(iter.ReadStringAsSlice()) + case "xattrs": + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + count += len(iter.ReadStringAsSlice()) + } + default: + iter.Skip() + } + } + } + break + } + + buf.Grow(count) + + getString := func(b []byte) string { + from := buf.Len() + buf.Write(b) + to := buf.Len() + return byteSliceAsString(buf.Bytes()[from:to]) + } + + iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field == "version" { + toc.Version = iter.ReadInt() + continue + } + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type": + m.Type = getString(iter.ReadStringAsSlice()) + case "name": + m.Name = getString(iter.ReadStringAsSlice()) + case "linkName": + m.Linkname = getString(iter.ReadStringAsSlice()) + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "UID": + m.UID = iter.ReadInt() + case "GID": + m.GID = iter.ReadInt() + case "ModTime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devMajor": + m.Devmajor = iter.ReadInt64() + case "devMinor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = getString(iter.ReadStringAsSlice()) + case "offset": + m.Offset = iter.ReadInt64() + case "endOffset": + m.EndOffset = iter.ReadInt64() + case "chunkSize": + m.ChunkSize = iter.ReadInt64() + case "chunkOffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkDigest": + m.ChunkDigest = getString(iter.ReadStringAsSlice()) + case "chunkType": + m.ChunkType = getString(iter.ReadStringAsSlice()) + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + value := iter.ReadStringAsSlice() + m.Xattrs[key] = getString(value) + } + default: + iter.Skip() + } + } + toc.Entries = append(toc.Entries, m) + } + break + } + toc.StringsBuf = buf + return &toc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression.go b/vendor/github.com/containers/storage/pkg/chunked/compression.go new file mode 100644 index 00000000000..96254bc4e54 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compression.go @@ -0,0 +1,277 @@ +package chunked + +import ( + archivetar "archive/tar" + "bytes" + "encoding/binary" + "fmt" + "io" + "strconv" + + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/archive/tar" +) + +const ( + TypeReg = internal.TypeReg + TypeChunk = internal.TypeChunk + TypeLink = internal.TypeLink + TypeChar = internal.TypeChar + TypeBlock = internal.TypeBlock + TypeDir = internal.TypeDir + TypeFifo = internal.TypeFifo + TypeSymlink = internal.TypeSymlink +) + +var typesToTar = map[string]byte{ + TypeReg: tar.TypeReg, + TypeLink: tar.TypeLink, + TypeChar: tar.TypeChar, + TypeBlock: tar.TypeBlock, + TypeDir: tar.TypeDir, + TypeFifo: tar.TypeFifo, + TypeSymlink: tar.TypeSymlink, +} + +func typeToTarType(t string) (byte, error) { + r, found := typesToTar[t] + if !found { + return 0, fmt.Errorf("unknown type: %v", t) + } + return r, nil +} + +func isZstdChunkedFrameMagic(data []byte) bool { + if len(data) < 8 { + return false + } + return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8]) +} + +func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { + // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md + footerSize := int64(51) + if blobSize <= footerSize { + return nil, 0, errors.New("blob too small") + } + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + defer reader.Close() + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, 0, err + } + + /* Read the ToC offset: + - 10 bytes gzip header + - 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + - 2 bytes Extra: SI1 = 'S', SI2 = 'G' + - 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + - 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + - 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0 + - 8 bytes gzip footer + */ + tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64) + if err != nil { + return nil, 0, errors.Wrap(err, "parse ToC offset") + } + + size := int64(blobSize - footerSize - tocOffset) + // set a reasonable limit + if size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + chunk = ImageSourceChunk{ + Offset: uint64(tocOffset), + Length: uint64(size), + } + parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + + var tocReader io.ReadCloser + select { + case r := <-parts: + tocReader = r + case err := <-errs: + return nil, 0, err + } + defer tocReader.Close() + + r, err := pgzip.NewReader(tocReader) + if err != nil { + return nil, 0, err + } + defer r.Close() + + aTar := archivetar.NewReader(r) + + header, err := aTar.Next() + if err != nil { + return nil, 0, err + } + // set a reasonable limit + if header.Size > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + manifestUncompressed := make([]byte, header.Size) + if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil { + return nil, 0, err + } + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(manifestUncompressed); err != nil { + return nil, 0, err + } + + d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + if err != nil { + return nil, 0, err + } + if manifestDigester.Digest() != d { + return nil, 0, errors.New("invalid manifest checksum") + } + + return manifestUncompressed, tocOffset, nil +} + +// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must +// be specified. +// This function uses the io.containers.zstd-chunked. annotations when specified. +func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { + footerSize := int64(internal.FooterSizeSupported) + if blobSize <= footerSize { + return nil, 0, errors.New("blob too small") + } + + manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey] + if manifestChecksumAnnotation == "" { + return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) + } + + var offset, length, lengthUncompressed, manifestType uint64 + + if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { + if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil { + return nil, 0, err + } + } else { + chunk := ImageSourceChunk{ + Offset: uint64(blobSize - footerSize), + Length: uint64(footerSize), + } + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + footer := make([]byte, footerSize) + if _, err := io.ReadFull(reader, footer); err != nil { + return nil, 0, err + } + + offset = binary.LittleEndian.Uint64(footer[0:8]) + length = binary.LittleEndian.Uint64(footer[8:16]) + lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) + manifestType = binary.LittleEndian.Uint64(footer[24:32]) + if !isZstdChunkedFrameMagic(footer[32:40]) { + return nil, 0, errors.New("invalid magic number") + } + } + + if manifestType != internal.ManifestTypeCRFS { + return nil, 0, errors.New("invalid manifest type") + } + + // set a reasonable limit + if length > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + if lengthUncompressed > (1<<20)*50 { + return nil, 0, errors.New("manifest too big") + } + + chunk := ImageSourceChunk{ + Offset: offset, + Length: length, + } + + parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) + if err != nil { + return nil, 0, err + } + var reader io.ReadCloser + select { + case r := <-parts: + reader = r + case err := <-errs: + return nil, 0, err + } + + manifest := make([]byte, length) + if _, err := io.ReadFull(reader, manifest); err != nil { + return nil, 0, err + } + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(manifest); err != nil { + return nil, 0, err + } + + d, err := digest.Parse(manifestChecksumAnnotation) + if err != nil { + return nil, 0, err + } + if manifestDigester.Digest() != d { + return nil, 0, errors.New("invalid manifest checksum") + } + + decoder, err := zstd.NewReader(nil) + if err != nil { + return nil, 0, err + } + defer decoder.Close() + + b := make([]byte, 0, lengthUncompressed) + if decoded, err := decoder.DecodeAll(manifest, b); err == nil { + return decoded, int64(offset), nil + } + + return manifest, int64(offset), nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +// Deprecated: Use pkg/chunked/compressor.ZstdCompressor. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return compressor.ZstdCompressor(r, metadata, level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go new file mode 100644 index 00000000000..9212cbbcff8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go @@ -0,0 +1,26 @@ +package chunked + +import ( + "fmt" + "io" +) + +// ImageSourceChunk is a portion of a blob. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. +type ImageSourceSeekable interface { + // GetBlobAt returns a stream for the specified blob. + GetBlobAt([]ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// ErrBadRequest is returned when the request is not valid +type ErrBadRequest struct { +} + +func (e ErrBadRequest) Error() string { + return fmt.Sprintf("bad request") +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go new file mode 100644 index 00000000000..7de20feaaa0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -0,0 +1,1774 @@ +package chunked + +import ( + archivetar "archive/tar" + "context" + "encoding/base64" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/containerd/stargz-snapshotter/estargz" + storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + driversCopy "github.com/containers/storage/drivers/copy" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/types" + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" + "golang.org/x/sys/unix" +) + +const ( + maxNumberMissingChunks = 1024 + newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) + containersOverrideXattr = "user.containers.override_stat" + bigDataKey = "zstd-chunked-manifest" + + fileTypeZstdChunked = iota + fileTypeEstargz + fileTypeNoCompression + fileTypeHole + + copyGoRoutines = 32 +) + +type compressedFileType int + +type chunkedDiffer struct { + stream ImageSourceSeekable + manifest []byte + layersCache *layersCache + tocOffset int64 + fileType compressedFileType + + copyBuffer []byte + + gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader +} + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return unix.NsecToTimespec(time.UnixNano()) +} + +func doHardLink(srcFd int, destDirFd int, destBase string) error { + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) + return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + unix.Unlinkat(destDirFd, destBase, 0) + return doLink() + } + return err +} + +func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { + src := fmt.Sprintf("/proc/self/fd/%d", srcFd) + st, err := os.Stat(src) + if err != nil { + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) + } + + copyWithFileRange, copyWithFileClone := true, true + + if useHardLinks { + destDirPath := filepath.Dir(destFile) + destBase := filepath.Base(destFile) + destDir, err := openFileUnderRoot(destDirPath, dirfd, 0, mode) + if err == nil { + defer destDir.Close() + + err := doHardLink(srcFd, int(destDir.Fd()), destBase) + if err == nil { + return nil, st.Size(), nil + } + } + } + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) + if err != nil { + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) + } + + err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) + if err != nil { + dstFile.Close() + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) + } + return dstFile, st.Size(), nil +} + +// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. +func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + if _, ok := annotations[internal.ManifestChecksumKey]; ok { + return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss) + } + if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { + return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss) + } + return nil, errors.New("blob type not supported for partial retrieval") +} + +func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { + manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) + if err != nil { + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err + } + + return &chunkedDiffer{ + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeZstdChunked, + }, nil +} + +func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { + manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) + if err != nil { + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err + } + + return &chunkedDiffer{ + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeEstargz, + }, nil +} + +func makeCopyBuffer() []byte { + return make([]byte, 2<<20) +} + +// copyFileFromOtherLayer copies a file from another layer +// file is the file to look for. +// source is the path to the source layer checkout. +// name is the path to the file to copy in source. +// dirfd is an open file descriptor to the destination root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) + if err != nil { + return false, nil, 0, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err) + } + defer srcFile.Close() + + dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) + if err != nil { + return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) + } + return true, dstFile, written, nil +} + +// canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. +// It checks that the two files have the same UID, GID, file mode and xattrs. +func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool { + if file.UID != otherFile.UID { + return false + } + if file.GID != otherFile.GID { + return false + } + if file.Mode != otherFile.Mode { + return false + } + if !reflect.DeepEqual(file.Xattrs, otherFile.Xattrs) { + return false + } + return true +} + +// canDedupFileWithHardLink checks if the specified file can be deduplicated by an +// open file, given its descriptor and stat data. +func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool { + st, ok := s.Sys().(*syscall.Stat_t) + if !ok { + return false + } + + path := fmt.Sprintf("/proc/self/fd/%d", fd) + + listXattrs, err := system.Llistxattr(path) + if err != nil { + return false + } + + xattrs := make(map[string]string) + for _, x := range listXattrs { + v, err := system.Lgetxattr(path, x) + if err != nil { + return false + } + + if _, found := xattrsToIgnore[x]; found { + continue + } + xattrs[x] = string(v) + } + // fill only the attributes used by canDedupMetadataWithHardLink. + otherFile := internal.FileMetadata{ + UID: int(st.Uid), + GID: int(st.Gid), + Mode: int64(st.Mode), + Xattrs: xattrs, + } + return canDedupMetadataWithHardLink(file, &otherFile) +} + +func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { + digester := digest.Canonical.Digester() + if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { + return "", err + } + return digester.Digest(), nil +} + +// findFileInOSTreeRepos checks whether the requested file already exist in one of the OSTree repo and copies the file content from there if possible. +// file is the file to look for. +// ostreeRepos is a list of OSTree repos. +// dirfd is an open fd to the destination checkout. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + digest, err := digest.Parse(file.Digest) + if err != nil { + return false, nil, 0, nil + } + payloadLink := digest.Encoded() + ".payload-link" + if len(payloadLink) < 2 { + return false, nil, 0, nil + } + + for _, repo := range ostreeRepos { + sourceFile := filepath.Join(repo, "objects", payloadLink[:2], payloadLink[2:]) + st, err := os.Stat(sourceFile) + if err != nil || !st.Mode().IsRegular() { + continue + } + if st.Size() != file.Size { + continue + } + fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) + if err != nil { + return false, nil, 0, nil + } + f := os.NewFile(uintptr(fd), "fd") + defer f.Close() + + // check if the open file can be deduplicated with hard links + if useHardLinks && !canDedupFileWithHardLink(file, fd, st) { + continue + } + + dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) + if err != nil { + return false, nil, 0, nil + } + return true, dstFile, written, nil + } + // If hard links deduplication was used and it has failed, try again without hard links. + if useHardLinks { + return findFileInOSTreeRepos(file, ostreeRepos, dirfd, false) + } + + return false, nil, 0, nil +} + +// findFileOnTheHost checks whether the requested file already exist on the host and copies the file content from there if possible. +// It is currently implemented to look only at the file with the same path. Ideally it can detect the same content also at different +// paths. +// file is the file to look for. +// dirfd is an open fd to the destination checkout. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { + sourceFile := filepath.Clean(filepath.Join("/", file.Name)) + if !strings.HasPrefix(sourceFile, "/usr/") { + // limit host deduplication to files under /usr. + return false, nil, 0, nil + } + + st, err := os.Stat(sourceFile) + if err != nil || !st.Mode().IsRegular() { + return false, nil, 0, nil + } + + if st.Size() != file.Size { + return false, nil, 0, nil + } + + fd, err := unix.Open(sourceFile, unix.O_RDONLY|unix.O_NONBLOCK, 0) + if err != nil { + return false, nil, 0, nil + } + + f := os.NewFile(uintptr(fd), "fd") + defer f.Close() + + manifestChecksum, err := digest.Parse(file.Digest) + if err != nil { + return false, nil, 0, err + } + + checksum, err := getFileDigest(f, buf) + if err != nil { + return false, nil, 0, err + } + + if checksum != manifestChecksum { + return false, nil, 0, nil + } + + // check if the open file can be deduplicated with hard links + useHardLinks = useHardLinks && canDedupFileWithHardLink(file, fd, st) + + dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) + if err != nil { + return false, nil, 0, nil + } + + // calculate the checksum again to make sure the file wasn't modified while it was copied + if _, err := f.Seek(0, 0); err != nil { + dstFile.Close() + return false, nil, 0, err + } + checksum, err = getFileDigest(f, buf) + if err != nil { + dstFile.Close() + return false, nil, 0, err + } + if checksum != manifestChecksum { + dstFile.Close() + return false, nil, 0, nil + } + return true, dstFile, written, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// cache is the layers cache to use. +// file is the file to look for. +// dirfd is an open file descriptor to the checkout root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + target, name, err := cache.findFileInOtherLayers(file, useHardLinks) + if err != nil || name == "" { + return false, nil, 0, err + } + return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) +} + +func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { + if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { + return nil + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + for i := range manifest { + if options.ChownOpts != nil { + manifest[i].UID = options.ChownOpts.UID + manifest[i].GID = options.ChownOpts.GID + } else { + pair := idtools.IDPair{ + UID: manifest[i].UID, + GID: manifest[i].GID, + } + var err error + manifest[i].UID, manifest[i].GID, err = idMappings.ToContainer(pair) + if err != nil { + return err + } + } + } + return nil +} + +type originFile struct { + Root string + Path string + Offset int64 +} + +type missingFileChunk struct { + Gap int64 + Hole bool + + File *internal.FileMetadata + + CompressedSize int64 + UncompressedSize int64 +} + +type missingPart struct { + Hole bool + SourceChunk *ImageSourceChunk + OriginFile *originFile + Chunks []missingFileChunk +} + +func (o *originFile) OpenFile() (io.ReadCloser, error) { + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file under target rootfs: %w", err) + } + + if _, err := srcFile.Seek(o.Offset, 0); err != nil { + srcFile.Close() + return nil, err + } + return srcFile, nil +} + +// setFileAttrs sets the file attributes for file given metadata +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { + if file == nil || file.Fd() < 0 { + return errors.Errorf("invalid file") + } + fd := int(file.Fd()) + + t, err := typeToTarType(metadata.Type) + if err != nil { + return err + } + + // If it is a symlink, force to use the path + if t == tar.TypeSymlink { + usePath = true + } + + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + if usePath { + return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchown(fd, metadata.UID, metadata.GID) + } + + doSetXattr := func(k string, v []byte) error { + return unix.Fsetxattr(fd, k, v, 0) + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if usePath { + return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) + } + + doChmod := func() error { + if usePath { + return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchmod(fd, uint32(mode)) + } + + if err := doChown(); err != nil { + if !options.IgnoreChownErrors { + return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) + } + } + + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return fmt.Errorf("decode xattr %q: %w", v, err) + } + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) + } + } + + if err := doUtimes(); !canIgnore(err) { + return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) + } + + if err := doChmod(); !canIgnore(err) { + return fmt.Errorf("chmod %q: %w", metadata.Name, err) + } + return nil +} + +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := fmt.Sprintf("/proc/self/fd/%d", dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + fd := -1 + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName := filepath.Dir(name) + if dirName != "" { + newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return -1, err + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } + + target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + how := unix.OpenHow{ + Flags: flags, + Mode: uint64(mode & 07777), + Resolve: unix.RESOLVE_IN_ROOT, + } + return unix.Openat2(dirfd, name, &how) +} + +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } + } + return fd, err +} + +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} + +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() + + baseName := filepath.Base(name) + + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + return nil, err + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } + } + return nil, err +} + +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { + switch { + case partCompression == fileTypeHole: + // The entire part is a hole. Do not need to read from a file. + c.rawReader = nil + return fileTypeHole, nil + case mf.Hole: + // Only the missing chunk in the requested part refers to a hole. + // The received data must be discarded. + limitReader := io.LimitReader(from, mf.CompressedSize) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + return fileTypeHole, err + case partCompression == fileTypeZstdChunked: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.zstdReader == nil { + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.zstdReader = r + } else { + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } + } + case partCompression == fileTypeEstargz: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.gzipReader == nil { + r, err := pgzip.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.gzipReader = r + } else { + if err := c.gzipReader.Reset(c.rawReader); err != nil { + return partCompression, err + } + } + case partCompression == fileTypeNoCompression: + c.rawReader = io.LimitReader(from, mf.UncompressedSize) + default: + return partCompression, fmt.Errorf("unknown file type %q", c.fileType) + } + return partCompression, nil +} + +// hashHole writes SIZE zeros to the specified hasher +func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { + count := int64(len(copyBuffer)) + if size < count { + count = size + } + for i := int64(0); i < count; i++ { + copyBuffer[i] = 0 + } + for size > 0 { + count = int64(len(copyBuffer)) + if size < count { + count = size + } + if _, err := h.Write(copyBuffer[:count]); err != nil { + return err + } + size -= count + } + return nil +} + +// appendHole creates a hole with the specified size at the open fd. +func appendHole(fd int, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return err + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return err + } + return nil +} + +func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { + switch compression { + case fileTypeZstdChunked: + defer c.zstdReader.Reset(nil) + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeEstargz: + defer c.gzipReader.Close() + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeNoCompression: + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeHole: + if err := appendHole(int(destFile.file.Fd()), size); err != nil { + return err + } + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { + return err + } + default: + return fmt.Errorf("unknown file type %q", c.fileType) + } + return nil +} + +type destinationFile struct { + dirfd int + file *os.File + digester digest.Digester + hash hash.Hash + to io.Writer + metadata *internal.FileMetadata + options *archive.TarOptions +} + +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { + file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + if err != nil { + return nil, err + } + + digester := digest.Canonical.Digester() + hash := digester.Hash() + to := io.MultiWriter(file, hash) + + return &destinationFile{ + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + }, nil +} + +func (d *destinationFile) Close() error { + manifestChecksum, err := digest.Parse(d.metadata.Digest) + if err != nil { + return err + } + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) + } + + return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) +} + +func closeDestinationFiles(files chan *destinationFile, errors chan error) { + for f := range files { + errors <- f.Close() + } + close(errors) +} + +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { + var destFile *destinationFile + + filesToClose := make(chan *destinationFile, 3) + closeFilesErrors := make(chan error, 2) + + go closeDestinationFiles(filesToClose, closeFilesErrors) + defer func() { + close(filesToClose) + for e := range closeFilesErrors { + if e != nil && Err == nil { + Err = e + } + } + }() + + for _, missingPart := range missingParts { + var part io.ReadCloser + partCompression := c.fileType + switch { + case missingPart.Hole: + partCompression = fileTypeHole + case missingPart.OriginFile != nil: + var err error + part, err = missingPart.OriginFile.OpenFile() + if err != nil { + return err + } + partCompression = fileTypeNoCompression + case missingPart.SourceChunk != nil: + select { + case p := <-streams: + part = p + case err := <-errs: + return err + } + if part == nil { + return errors.Errorf("invalid stream returned") + } + default: + return errors.Errorf("internal error: missing part misses both local and remote data stream") + } + + for _, mf := range missingPart.Chunks { + if mf.Gap > 0 { + limitReader := io.LimitReader(part, mf.Gap) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + if err != nil { + Err = err + goto exit + } + continue + } + + if mf.File.Name == "" { + Err = errors.Errorf("file name empty") + goto exit + } + + compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + if err != nil { + Err = err + goto exit + } + + // Open the new file if it is different that what is already + // opened + if destFile == nil || destFile.metadata.Name != mf.File.Name { + var err error + if destFile != nil { + cleanup: + for { + select { + case err = <-closeFilesErrors: + if err != nil { + Err = err + goto exit + } + default: + break cleanup + } + } + filesToClose <- destFile + } + destFile, err = openDestinationFile(dirfd, mf.File, options) + if err != nil { + Err = err + goto exit + } + } + + if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil { + Err = err + goto exit + } + if c.rawReader != nil { + if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { + Err = err + goto exit + } + } + } + exit: + if part != nil { + part.Close() + if Err != nil { + break + } + } + } + + if destFile != nil { + return destFile.Close() + } + + return nil +} + +func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { + getGap := func(missingParts []missingPart, i int) int { + prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length + return int(missingParts[i].SourceChunk.Offset - prev) + } + getCost := func(missingParts []missingPart, i int) int { + cost := getGap(missingParts, i) + if missingParts[i-1].OriginFile != nil { + cost += int(missingParts[i-1].SourceChunk.Length) + } + if missingParts[i].OriginFile != nil { + cost += int(missingParts[i].SourceChunk.Length) + } + return cost + } + + // simple case: merge chunks from the same file. + newMissingParts := missingParts[0:1] + prevIndex := 0 + for i := 1; i < len(missingParts); i++ { + gap := getGap(missingParts, i) + if gap == 0 && missingParts[prevIndex].OriginFile == nil && + missingParts[i].OriginFile == nil && + !missingParts[prevIndex].Hole && !missingParts[i].Hole && + len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && + missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { + missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize + missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize + } else { + newMissingParts = append(newMissingParts, missingParts[i]) + prevIndex++ + } + } + missingParts = newMissingParts + + if len(missingParts) <= target { + return missingParts + } + + // this implementation doesn't account for duplicates, so it could merge + // more than necessary to reach the specified target. Since target itself + // is a heuristic value, it doesn't matter. + costs := make([]int, len(missingParts)-1) + for i := 1; i < len(missingParts); i++ { + costs[i-1] = getCost(missingParts, i) + } + sort.Ints(costs) + + toShrink := len(missingParts) - target + if toShrink >= len(costs) { + toShrink = len(costs) - 1 + } + targetValue := costs[toShrink] + + newMissingParts = missingParts[0:1] + for i := 1; i < len(missingParts); i++ { + if getCost(missingParts, i) > targetValue { + newMissingParts = append(newMissingParts, missingParts[i]) + } else { + gap := getGap(missingParts, i) + prev := &newMissingParts[len(newMissingParts)-1] + prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.Hole = false + prev.OriginFile = nil + if gap > 0 { + gapFile := missingFileChunk{ + Gap: int64(gap), + } + prev.Chunks = append(prev.Chunks, gapFile) + } + prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) + } + } + return newMissingParts +} + +func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { + var chunksToRequest []ImageSourceChunk + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } + } + + // There are some missing files. Prepare a multirange request for the missing chunks. + var streams chan io.ReadCloser + var err error + var errs chan error + for { + streams, errs, err = c.stream.GetBlobAt(chunksToRequest) + if err == nil { + break + } + + if _, ok := err.(ErrBadRequest); ok { + requested := len(missingParts) + // If the server cannot handle at least 64 chunks in a single request, just give up. + if requested < 64 { + return err + } + + // Merge more chunks to request + missingParts = mergeMissingChunks(missingParts, requested/2) + continue + } + return err + } + + if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { + return err + } + return nil +} + +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(name) + base := filepath.Base(name) + + parentFd := dirfd + if parent != "." { + parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err != nil { + return err + } + defer parentFile.Close() + parentFd = int(parentFile.Fd()) + } + + if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { + if !os.IsExist(err) { + return fmt.Errorf("mkdir %q: %w", name, err) + } + } + + file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return err + } + defer file.Close() + + return setFileAttrs(dirfd, file, mode, metadata, options, false) +} + +func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { + sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer sourceFile.Close() + + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) + if err != nil { + return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) + if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) +} + +func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { + destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) + destDirFd := dirfd + if destDir != "." { + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) + if err != nil { + return err + } + defer f.Close() + destDirFd = int(f.Fd()) + } + + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + return nil +} + +type whiteoutHandler struct { + Dirfd int + Root string +} + +func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { + file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) + } + return nil +} + +func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { + dir := filepath.Dir(path) + base := filepath.Base(path) + + dirfd := d.Dirfd + if dir != "" { + dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) + if err != nil { + return err + } + defer dir.Close() + + dirfd = int(dir.Fd()) + } + + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return fmt.Errorf("mknod %q: %w", path, err) + } + + return nil +} + +func checkChownErr(err error, name string, uid, gid int) error { + if errors.Is(err, syscall.EINVAL) { + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) + } + return err +} + +func (d whiteoutHandler) Chown(path string, uid, gid int) error { + file, err := openFileUnderRoot(path, d.Dirfd, unix.O_PATH, 0) + if err != nil { + return err + } + defer file.Close() + + if err := unix.Fchownat(int(file.Fd()), "", uid, gid, unix.AT_EMPTY_PATH); err != nil { + var stat unix.Stat_t + if unix.Fstat(int(file.Fd()), &stat) == nil { + if stat.Uid == uint32(uid) && stat.Gid == uint32(gid) { + return nil + } + } + return checkChownErr(err, path, uid, gid) + } + return nil +} + +type hardLinkToCreate struct { + dest string + dirfd int + mode os.FileMode + metadata *internal.FileMetadata +} + +func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { + if value, ok := storeOpts.PullOptions[name]; ok { + return strings.ToLower(value) == "true" + } + return def +} + +type findAndCopyFileOptions struct { + useHardLinks bool + enableHostDedup bool + ostreeRepos []string + options *archive.TarOptions +} + +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { + finalizeFile := func(dstFile *os.File) error { + if dstFile != nil { + defer dstFile.Close() + if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { + return err + } + } + return nil + } + + found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + if copyOptions.enableHostDedup { + found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + +func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { + defer c.layersCache.release() + defer func() { + if c.zstdReader != nil { + c.zstdReader.Close() + } + }() + + bigData := map[string][]byte{ + bigDataKey: c.manifest, + } + output := graphdriver.DriverWithDifferOutput{ + Differ: c, + BigData: bigData, + } + + storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return output, err + } + + if !parseBooleanPullOption(&storeOpts, "enable_partial_images", false) { + return output, errors.New("enable_partial_images not configured") + } + + enableHostDedup := parseBooleanPullOption(&storeOpts, "enable_host_deduplication", false) + + // When the hard links deduplication is used, file attributes are ignored because setting them + // modifies the source file as well. + useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false) + + // List of OSTree repositories to use for deduplication + ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") + + // Generate the manifest + toc, err := unmarshalToc(c.manifest) + if err != nil { + return output, err + } + + whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + + var missingParts []missingPart + + mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) + if err != nil { + return output, err + } + if err := maybeDoIDRemap(mergedEntries, options); err != nil { + return output, err + } + + if options.ForceMask != nil { + uid, gid, mode, err := archive.GetFileOwner(dest) + if err == nil { + value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) + if err := unix.Setxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { + return output, err + } + } + } + + dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) + if err != nil { + return output, fmt.Errorf("cannot open %q: %w", dest, err) + } + defer unix.Close(dirfd) + + // hardlinks can point to missing files. So create them after all files + // are retrieved + var hardLinks []hardLinkToCreate + + missingPartsSize, totalChunksSize := int64(0), int64(0) + + copyOptions := findAndCopyFileOptions{ + useHardLinks: useHardLinks, + enableHostDedup: enableHostDedup, + ostreeRepos: ostreeRepos, + options: options, + } + + type copyFileJob struct { + njob int + index int + mode os.FileMode + metadata *internal.FileMetadata + + found bool + err error + } + + var wg sync.WaitGroup + + copyResults := make([]copyFileJob, len(mergedEntries)) + + copyFileJobs := make(chan copyFileJob) + defer func() { + if copyFileJobs != nil { + close(copyFileJobs) + } + wg.Wait() + }() + + for i := 0; i < copyGoRoutines; i++ { + wg.Add(1) + jobs := copyFileJobs + + go func() { + defer wg.Done() + for job := range jobs { + found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) + job.err = err + job.found = found + copyResults[job.njob] = job + } + }() + } + + filesToWaitFor := 0 + for i, r := range mergedEntries { + if options.ForceMask != nil { + value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) + r.Xattrs[containersOverrideXattr] = base64.StdEncoding.EncodeToString([]byte(value)) + r.Mode = int64(*options.ForceMask) + } + + mode := os.FileMode(r.Mode) + + r.Name = filepath.Clean(r.Name) + r.Linkname = filepath.Clean(r.Linkname) + + t, err := typeToTarType(r.Type) + if err != nil { + return output, err + } + if whiteoutConverter != nil { + hdr := archivetar.Header{ + Typeflag: t, + Name: r.Name, + Linkname: r.Linkname, + Size: r.Size, + Mode: r.Mode, + Uid: r.UID, + Gid: r.GID, + } + handler := whiteoutHandler{ + Dirfd: dirfd, + Root: dest, + } + writeFile, err := whiteoutConverter.ConvertReadWithHandler(&hdr, r.Name, &handler) + if err != nil { + return output, err + } + if !writeFile { + continue + } + } + switch t { + case tar.TypeReg: + // Create directly empty files. + if r.Size == 0 { + // Used to have a scope for cleanup. + createEmptyFile := func() error { + file, err := openFileUnderRoot(r.Name, dirfd, newFileFlags, 0) + if err != nil { + return err + } + defer file.Close() + if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { + return err + } + return nil + } + if err := createEmptyFile(); err != nil { + return output, err + } + continue + } + + case tar.TypeDir: + if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { + return output, err + } + continue + + case tar.TypeLink: + dest := dest + dirfd := dirfd + mode := mode + r := r + hardLinks = append(hardLinks, hardLinkToCreate{ + dest: dest, + dirfd: dirfd, + mode: mode, + metadata: &r, + }) + continue + + case tar.TypeSymlink: + if err := safeSymlink(dirfd, mode, &r, options); err != nil { + return output, err + } + continue + + case tar.TypeChar: + case tar.TypeBlock: + case tar.TypeFifo: + /* Ignore. */ + default: + return output, fmt.Errorf("invalid type %q", t) + } + + totalChunksSize += r.Size + + if t == tar.TypeReg { + index := i + njob := filesToWaitFor + job := copyFileJob{ + mode: mode, + metadata: &mergedEntries[index], + index: index, + njob: njob, + } + copyFileJobs <- job + filesToWaitFor++ + } + } + + close(copyFileJobs) + copyFileJobs = nil + + wg.Wait() + + for _, res := range copyResults[:filesToWaitFor] { + if res.err != nil { + return output, res.err + } + // the file was already copied to its destination + // so nothing left to do. + if res.found { + continue + } + + r := &mergedEntries[res.index] + + missingPartsSize += r.Size + + remainingSize := r.Size + + // the file is missing, attempt to find individual chunks. + for _, chunk := range r.Chunks { + compressedSize := int64(chunk.EndOffset - chunk.Offset) + size := remainingSize + if chunk.ChunkSize > 0 { + size = chunk.ChunkSize + } + remainingSize = remainingSize - size + + rawChunk := ImageSourceChunk{ + Offset: uint64(chunk.Offset), + Length: uint64(compressedSize), + } + file := missingFileChunk{ + File: &mergedEntries[res.index], + CompressedSize: compressedSize, + UncompressedSize: size, + } + mp := missingPart{ + SourceChunk: &rawChunk, + Chunks: []missingFileChunk{ + file, + }, + } + + switch chunk.ChunkType { + case internal.ChunkTypeData: + root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) + if err != nil { + return output, err + } + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + missingPartsSize -= size + mp.OriginFile = &originFile{ + Root: root, + Path: path, + Offset: offset, + } + } + case internal.ChunkTypeZeros: + missingPartsSize -= size + mp.Hole = true + // Mark all chunks belonging to the missing part as holes + for i := range mp.Chunks { + mp.Chunks[i].Hole = true + } + } + missingParts = append(missingParts, mp) + } + } + // There are some missing files. Prepare a multirange request for the missing chunks. + if len(missingParts) > 0 { + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) + if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { + return output, err + } + } + + for _, m := range hardLinks { + if err := safeLink(m.dirfd, m.mode, m.metadata, options); err != nil { + return output, err + } + } + + if totalChunksSize > 0 { + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) + } + return output, nil +} + +func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { + // ignore the metadata files for the estargz format. + if fileType != fileTypeEstargz { + return false + } + switch e.Name { + // ignore the metadata files for the estargz format. + case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: + return true + } + return false +} + +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { + countNextChunks := func(start int) int { + count := 0 + for _, e := range entries[start:] { + if e.Type != TypeChunk { + return count + } + count++ + } + return count + } + + size := 0 + for _, entry := range entries { + if mustSkipFile(fileType, entry) { + continue + } + if entry.Type != TypeChunk { + size++ + } + } + + mergedEntries := make([]internal.FileMetadata, size) + m := 0 + for i := 0; i < len(entries); i++ { + e := entries[i] + if mustSkipFile(fileType, e) { + continue + } + if e.Type == TypeChunk { + return nil, fmt.Errorf("chunk type without a regular file") + } + + if e.Type == TypeReg { + nChunks := countNextChunks(i + 1) + + e.Chunks = make([]*internal.FileMetadata, nChunks+1) + for j := 0; j <= nChunks; j++ { + e.Chunks[j] = &entries[i+j] + e.EndOffset = entries[i+j].EndOffset + } + i += nChunks + } + mergedEntries[m] = e + m++ + } + // stargz/estargz doesn't store EndOffset so let's calculate it here + lastOffset := c.tocOffset + for i := len(mergedEntries) - 1; i >= 0; i-- { + if mergedEntries[i].EndOffset == 0 { + mergedEntries[i].EndOffset = lastOffset + } + if mergedEntries[i].Offset != 0 { + lastOffset = mergedEntries[i].Offset + } + + lastChunkOffset := mergedEntries[i].EndOffset + for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { + mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset + lastChunkOffset = mergedEntries[i].Chunks[j].Offset + } + } + return mergedEntries, nil +} + +// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the +// same digest as chunk.ChunkDigest +func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return false + } + defer unix.Close(parentDirfd) + + fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + if err != nil { + return false + } + defer fd.Close() + + if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + return false + } + + r := io.LimitReader(fd, chunk.ChunkSize) + digester := digest.Canonical.Digester() + + if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { + return false + } + + digest, err := digest.Parse(chunk.ChunkDigest) + if err != nil { + return false + } + + return digester.Digest() == digest +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go new file mode 100644 index 00000000000..3a406ba786a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package chunked + +import ( + "context" + + storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" + "github.com/pkg/errors" +) + +// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. +func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + return nil, errors.New("format not supported on this architecture") +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go new file mode 100644 index 00000000000..f6e0cfcfe86 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -0,0 +1,337 @@ +package config + +import ( + "fmt" + "os" +) + +// ThinpoolOptionsConfig represents the "storage.options.thinpool" +// TOML config table. +type ThinpoolOptionsConfig struct { + // AutoExtendPercent determines the amount by which pool needs to be + // grown. This is specified in terms of % of pool size. So a value of + // 20 means that when threshold is hit, pool will be grown by 20% of + // existing pool size. + AutoExtendPercent string `toml:"autoextend_percent,omitempty"` + + // AutoExtendThreshold determines the pool extension threshold in terms + // of percentage of pool size. For example, if threshold is 60, that + // means when pool is 60% full, threshold has been hit. + AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` + + // BaseSize specifies the size to use when creating the base device, + // which limits the size of images and containers. + BaseSize string `toml:"basesize,omitempty"` + + // BlockSize specifies a custom blocksize to use for the thin pool. + BlockSize string `toml:"blocksize,omitempty"` + + // DirectLvmDevice specifies a custom block storage device to use for + // the thin pool. + DirectLvmDevice string `toml:"directlvm_device,omitempty"` + + // DirectLvmDeviceForcewipes device even if device already has a + // filesystem + DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` + + // Fs specifies the filesystem type to use for the base device. + Fs string `toml:"fs,omitempty"` + + // log_level sets the log level of devicemapper. + LogLevel string `toml:"log_level,omitempty"` + + // MetadataSize specifies the size of the metadata for the thinpool + // It will be used with the `pvcreate --metadata` option. + MetadataSize string `toml:"metadatasize,omitempty"` + + // MinFreeSpace specifies the min free space percent in a thin pool + // require for new device creation to + MinFreeSpace string `toml:"min_free_space,omitempty"` + + // MkfsArg specifies extra mkfs arguments to be used when creating the + // basedevice. + MkfsArg string `toml:"mkfsarg,omitempty"` + + // MountOpt specifies extra mount options used when mounting the thin + // devices. + MountOpt string `toml:"mountopt,omitempty"` + + // Size + Size string `toml:"size,omitempty"` + + // UseDeferredDeletion marks device for deferred deletion + UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` + + // UseDeferredRemoval marks device for deferred removal + UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` + + // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of + // retries XFS should attempt to complete IO when ENOSPC (no space) + // error is returned by underlying storage device. + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` +} + +type AufsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` +} + +type BtrfsOptionsConfig struct { + // MinSpace is the minimal spaces allocated to the device + MinSpace string `toml:"min_space,omitempty"` + // Size + Size string `toml:"size,omitempty"` +} + +type OverlayOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program,omitempty"` + // Size + Size string `toml:"size,omitempty"` + // Inodes is used to set a maximum inodes of the container image. + Inodes string `toml:"inodes,omitempty"` + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home,omitempty"` + // ForceMask indicates the permissions mask (e.g. "0755") to use for new + // files and directories + ForceMask string `toml:"force_mask,omitempty"` +} + +type VfsOptionsConfig struct { + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` +} + +type ZfsOptionsConfig struct { + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` + // Name is the File System name of the ZFS File system + Name string `toml:"fsname,omitempty"` + // Size + Size string `toml:"size,omitempty"` +} + +// OptionsConfig represents the "storage.options" TOML config table. +type OptionsConfig struct { + // AdditionalImagesStores is the location of additional read/only + // Image stores. Usually used to access Networked File System + // for shared image content + AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` + + // AdditionalLayerStores is the location of additional read/only + // Layer stores. Usually used to access Networked File System + // for shared image content + // This API is experimental and can be changed without bumping the + // major version number. + AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` + + // Size + Size string `toml:"size,omitempty"` + + // RemapUIDs is a list of default UID mappings to use for layers. + RemapUIDs string `toml:"remap-uids,omitempty"` + // RemapGIDs is a list of default GID mappings to use for layers. + RemapGIDs string `toml:"remap-gids,omitempty"` + // IgnoreChownErrors is a flag for whether chown errors should be + // ignored when building an image. + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` + + // ForceMask indicates the permissions mask (e.g. "0755") to use for new + // files and directories. + ForceMask os.FileMode `toml:"force_mask,omitempty"` + + // RemapUser is the name of one or more entries in /etc/subuid which + // should be used to set up default UID mappings. + RemapUser string `toml:"remap-user,omitempty"` + // RemapGroup is the name of one or more entries in /etc/subgid which + // should be used to set up default GID mappings. + RemapGroup string `toml:"remap-group,omitempty"` + + // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and + // /etc/subgid which should be used to set up automatically a userns. + RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` + + // AutoUsernsMinSize is the minimum size for a user namespace that is + // created automatically. + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"` + + // AutoUsernsMaxSize is the maximum size for a user namespace that is + // created automatically. + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` + + // Aufs container options to be handed to aufs drivers + Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` + + // Btrfs container options to be handed to btrfs drivers + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` + + // Thinpool container options to be handed to thinpool drivers + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` + + // Overlay container options to be handed to overlay drivers + Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` + + // Vfs container options to be handed to VFS drivers + Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"` + + // Zfs container options to be handed to ZFS drivers + Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"` + + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home,omitempty"` + + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program,omitempty"` + + // MountOpt specifies extra mount options used when mounting + MountOpt string `toml:"mountopt,omitempty"` + + // PullOptions specifies options to be handed to pull managers + // This API is experimental and can be changed without bumping the major version number. + PullOptions map[string]string `toml:"pull_options,omitempty"` + + // DisableVolatile doesn't allow volatile mounts when it is set. + DisableVolatile bool `toml:"disable-volatile,omitempty"` +} + +// GetGraphDriverOptions returns the driver specific options +func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { + var doptions []string + switch driverName { + case "aufs": + if options.Aufs.MountOpt != "" { + return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + case "btrfs": + if options.Btrfs.MinSpace != "" { + return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace)) + } + if options.Btrfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + case "devicemapper": + if options.Thinpool.AutoExtendPercent != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent)) + } + if options.Thinpool.AutoExtendThreshold != "" { + doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold)) + } + if options.Thinpool.BaseSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize)) + } + if options.Thinpool.BlockSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize)) + } + if options.Thinpool.DirectLvmDevice != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice)) + } + if options.Thinpool.DirectLvmDeviceForce != "" { + doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce)) + } + if options.Thinpool.Fs != "" { + doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs)) + } + if options.Thinpool.LogLevel != "" { + doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel)) + } + if options.Thinpool.MetadataSize != "" { + doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize)) + } + if options.Thinpool.MinFreeSpace != "" { + doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace)) + } + if options.Thinpool.MkfsArg != "" { + doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg)) + } + if options.Thinpool.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + + if options.Thinpool.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + + if options.Thinpool.UseDeferredDeletion != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion)) + } + if options.Thinpool.UseDeferredRemoval != "" { + doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval)) + } + if options.Thinpool.XfsNoSpaceMaxRetries != "" { + doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries)) + } + + case "overlay", "overlay2": + if options.Overlay.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + if options.Overlay.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram)) + } else if options.MountProgram != "" { + doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram)) + } + if options.Overlay.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Overlay.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + if options.Overlay.Inodes != "" { + doptions = append(doptions, fmt.Sprintf("%s.inodes=%s", driverName, options.Overlay.Inodes)) + } + if options.Overlay.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome)) + } else if options.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome)) + } + if options.Overlay.ForceMask != "" { + doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.Overlay.ForceMask)) + } else if options.ForceMask != 0 { + doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask)) + } + case "vfs": + if options.Vfs.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) + } else if options.IgnoreChownErrors != "" { + doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors)) + } + + case "zfs": + if options.Zfs.Name != "" { + doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name)) + } + if options.Zfs.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt)) + } else if options.MountOpt != "" { + doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) + } + if options.Zfs.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size)) + } else if options.Size != "" { + doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) + } + } + return doptions +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go new file mode 100644 index 00000000000..6a0ac246479 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -0,0 +1,821 @@ +// +build linux,cgo + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// Same as DM_DEVICE_* enum values from libdevmapper.h +// nolint: deadcode +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + runtime.KeepAlive(t) + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) + return ErrUdevWait + } + return nil +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(cookie) + + dmSawBusy = false // reset before the task is run + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all following calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(cookie) + + dmSawEnxio = false + if err = task.run(); err != nil { + if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + unix.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + cookie := new(uint) + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go new file mode 100644 index 00000000000..082fb1ba329 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,121 @@ +// +build linux,cgo + +package devicemapper + +import "C" + +import ( + "fmt" + "strings" + + "github.com/sirupsen/logrus" +) + +// DevmapperLogger defines methods required to register as a callback for +// logging events received from devicemapper. Note that devicemapper will send +// *all* logs regardless to callbacks (including debug logs) so it's +// recommended to not spam the console with the outputs. +type DevmapperLogger interface { + // DMLog is the logging callback containing all of the information from + // devicemapper. The interface is identical to the C libdm counterpart. + DMLog(level int, file string, line int, dmError int, message string) +} + +// dmLogger is the current logger in use that is being forwarded our messages. +var dmLogger DevmapperLogger + +// LogInit changes the logging callback called after processing libdm logs for +// error message information. The default logger simply forwards all logs to +// logrus. Calling LogInit(nil) disables the calling of callbacks. +func LogInit(logger DevmapperLogger) { + dmLogger = logger +} + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that +// because we are using callbacks, this function will be called for *every* log +// in libdm (even debug ones because there's no way of setting the verbosity +// level for an external logging callback). +//export StorageDevmapperLogCallback +func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + + // Track what errno libdm saw, because the library only gives us 0 or 1. + if level < LogLevelDebug { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} + +// DefaultLogger is the default logger used by pkg/devicemapper. It forwards +// all logs that are of higher or equal priority to the given level to the +// corresponding logrus level. +type DefaultLogger struct { + // Level corresponds to the highest libdm level that will be forwarded to + // logrus. In order to change this, register a new DefaultLogger. + Level int +} + +// DMLog is the logging callback containing all of the information from +// devicemapper. The interface is identical to the C libdm counterpart. +func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { + if level <= l.Level { + // Forward the log to the correct logrus level, if allowed by dmLogLevel. + logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + switch level { + case LogLevelFatal, LogLevelErr: + logrus.Error(logMsg) + case LogLevelWarn: + logrus.Warn(logMsg) + case LogLevelNotice, LogLevelInfo: + logrus.Info(logMsg) + case LogLevelDebug: + logrus.Debug(logMsg) + default: + // Don't drop any "unknown" levels. + logrus.Info(logMsg) + } + } +} + +// registerLogCallback registers our own logging callback function for libdm +// (which is StorageDevmapperLogCallback). +// +// Because libdm only gives us {0,1} error codes we need to parse the logs +// produced by libdm (to set dmSawBusy and so on). Note that by registering a +// callback using StorageDevmapperLogCallback, libdm will no longer output logs to +// stderr so we have to log everything ourselves. None of this handling is +// optional because we depend on log callbacks to parse the logs, and if we +// don't forward the log information we'll be in a lot of trouble when +// debugging things. +func registerLogCallback() { + LogWithErrnoInit() +} + +func init() { + // Use the default logger by default. We only allow LogLevelFatal by + // default, because internally we mask a lot of libdm errors by retrying + // and similar tricks. Also, libdm is very chatty and we don't want to + // worry users for no reason. + dmLogger = DefaultLogger{ + Level: LogLevelFatal, + } + + // Register as early as possible so we don't miss anything. + registerLogCallback() +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 00000000000..190d83d4999 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,252 @@ +// +build linux,cgo + +package devicemapper + +/* +#define _GNU_SOURCE +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char *buffer = NULL; + va_list ap; + int ret; + + va_start(ap, f); + ret = vasprintf(&buffer, f, ap); + va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } + + StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + free(buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 00000000000..7f793c27086 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,31 @@ +// +build linux,cgo,!libdm_no_deferred_remove + +package devicemapper + +// #include +import "C" + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go new file mode 100644 index 00000000000..7d84508982d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build + +package devicemapper + +// #cgo pkg-config: devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 00000000000..a880fec8c49 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,cgo,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport tells if the feature is enabled in the build +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go new file mode 100644 index 00000000000..cf7f26a4c67 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go @@ -0,0 +1,6 @@ +// +build linux,cgo,static_build + +package devicemapper + +// #cgo pkg-config: --static devmapper +import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go new file mode 100644 index 00000000000..50ea7c48238 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -0,0 +1,28 @@ +// +build linux,cgo + +package devicemapper + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go new file mode 100644 index 00000000000..cee5e545498 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go new file mode 100644 index 00000000000..b0ce706e5ed --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory.go @@ -0,0 +1,32 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// DiskUsage is a structure that describes the disk usage (size and inode count) +// of a particular directory. +type DiskUsage struct { + Size int64 + InodeCount int64 +} + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go new file mode 100644 index 00000000000..36e1bdd5fc8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -0,0 +1,62 @@ +//go:build linux || darwin || freebsd || solaris +// +build linux darwin freebsd solaris + +package directory + +import ( + "io/fs" + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + usage, err := Usage(dir) + if err != nil { + return 0, err + } + return usage.Size, nil +} + +// Usage walks a directory tree and returns its total size in bytes and the number of inodes. +func Usage(dir string) (usage *DiskUsage, err error) { + usage = &DiskUsage{} + data := make(map[uint64]struct{}) + err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { + if err != nil { + // if dir does not exist, Usage() returns the error. + // if dir/x disappeared while walking, Usage() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + fileInfo, err := entry.Info() + if err != nil { + return err + } + + // Check inode to only count the sizes of files with multiple hard links once. + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + // Ignore directory sizes + if entry.IsDir() { + return nil + } + + usage.Size += fileInfo.Size() + + return nil + }) + // inode count is the number of unique inode numbers we saw + usage.InodeCount = int64(len(data)) + return +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go new file mode 100644 index 00000000000..482bc51a26e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -0,0 +1,50 @@ +//go:build windows +// +build windows + +package directory + +import ( + "io/fs" + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes +func Size(dir string) (size int64, err error) { + usage, err := Usage(dir) + if err != nil { + return 0, nil + } + return usage.Size, nil +} + +// Usage walks a directory tree and returns its total size in bytes and the number of inodes. +func Usage(dir string) (usage *DiskUsage, err error) { + usage = &DiskUsage{} + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && path != dir { + return nil + } + return err + } + + usage.InodeCount++ + + // Ignore directory sizes + if d.IsDir() { + return nil + } + + fileInfo, err := d.Info() + if err != nil { + return err + } + usage.Size += fileInfo.Size() + + return nil + }) + return +} diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go new file mode 100644 index 00000000000..7df7f3d4364 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go @@ -0,0 +1,20 @@ +// +build linux + +package dmesg + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Dmesg returns last messages from the kernel log, up to size bytes +func Dmesg(size int) []byte { + t := uintptr(3) // SYSLOG_ACTION_READ_ALL + b := make([]byte, size) + amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if err != 0 { + return []byte{} + } + return b[:amt] +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go new file mode 100644 index 00000000000..5be98165ef7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -0,0 +1,372 @@ +package fileutils + +import ( + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = strings.TrimPrefix(filepath.Clean(p[1:]), "/") + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Deprecated: Please use the `MatchesResult` method instead. +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +type MatchResult struct { + isMatched bool + matches, excludes uint +} + +// Excludes returns true if the overall result is matched +func (m *MatchResult) IsMatched() bool { + return m.isMatched +} + +// Excludes returns the amount of matches of an MatchResult +func (m *MatchResult) Matches() uint { + return m.matches +} + +// Excludes returns the amount of excludes of an MatchResult +func (m *MatchResult) Excludes() uint { + return m.excludes +} + +// MatchesResult verifies the provided filepath against all patterns. +// It returns the `*MatchResult` result for the patterns on success, otherwise +// an error. This method is not safe to be called concurrently. +func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) { + file = filepath.FromSlash(file) + res = &MatchResult{false, 0, 0} + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return nil, err + } + + if match { + res.isMatched = !negative + if negative { + res.excludes++ + } else { + res.matches++ + } + } + } + + if res.matches > 0 { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return res, nil +} + +// IsMatch verifies the provided filepath against all patterns and returns true +// if it matches. A match is valid if the last match is a positive one. +// It returns an error on failure and is not safe to be called concurrently. +func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) { + res, err := pm.MatchesResult(file) + if err != nil { + return false, err + } + return res.isMatched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + const bs = `\` + if sl == bs { + escSL += bs + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += bs + string(ch) + } else if ch == '\\' { + // escape next char. + if sl == bs { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += bs + string(scan.Next()) + } else { + return filepath.ErrBadPattern + } + } else { + regStr += string(ch) + } + } + + regStr += "(" + escSL + ".*)?$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.IsMatch(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// ReadSymlinkedPath returns the target directory of a symlink. +// The target of the symbolic link can be a file and a directory. +func ReadSymlinkedPath(path string) (realPath string, err error) { + if realPath, err = filepath.Abs(path); err != nil { + return "", errors.Wrapf(err, "unable to get absolute path for %q", path) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", errors.Wrapf(err, "failed to canonicalise path for %q", path) + } + if _, err := os.Stat(realPath); err != nil { + return "", errors.Wrapf(err, "failed to stat target %q of %q", realPath, path) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 00000000000..ccd648fac30 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 00000000000..0f2cb7ab933 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000000..92056c1d5f6 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("%v", err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000000..5ec21cace52 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go new file mode 100644 index 00000000000..e6094b55b71 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,88 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *unix.Dirent) bool { + visited++ + if ent.Type == unix.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := unix.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index a19ba288b40..7a8fec0ce5f 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -3,15 +3,18 @@ package idtools import ( "bufio" "fmt" + "io/ioutil" "os" "os/user" "sort" "strconv" "strings" + "sync" "syscall" "github.com/containers/storage/pkg/system" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -190,7 +193,6 @@ func (i *IDMappings) RootPair() IDPair { } // ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { var err error var target IDPair @@ -204,6 +206,67 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { return target, err } +var ( + overflowUIDOnce sync.Once + overflowGIDOnce sync.Once + overflowUID int + overflowGID int +) + +// getOverflowUID returns the UID mapped to the overflow user +func getOverflowUID() int { + overflowUIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present + overflowUID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowUID = tmp + } + } + }) + return overflowUID +} + +// getOverflowUID returns the GID mapped to the overflow user +func getOverflowGID() int { + overflowGIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present + overflowGID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowGID = tmp + } + } + }) + return overflowGID +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +// If the mapping is not possible because the target ID is not mapped into +// the namespace, then the overflow ID is used. +func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + target.UID = getOverflowUID() + logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) + } + } + + if pair.GID != target.GID { + target.GID, err = RawToHost(pair.GID, i.gids) + if err != nil { + target.GID = getOverflowGID() + logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) + } + } + return target, nil +} + // ToContainer returns the container UID and GID for the host uid and gid func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { uid, err := RawToContainer(pair.UID, i.uids) diff --git a/vendor/github.com/containers/storage/pkg/locker/README.md b/vendor/github.com/containers/storage/pkg/locker/README.md new file mode 100644 index 00000000000..ad15e89af10 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/containers/storage/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/containers/storage/pkg/locker/locker.go new file mode 100644 index 00000000000..0b22ddfab85 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go new file mode 100644 index 00000000000..6f072650537 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -0,0 +1,157 @@ +// +build linux,cgo + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { + // Read information about the loopback file. + var st syscall.Stat_t + err = syscall.Fstat(int(sparseFile.Fd()), &st) + if err != nil { + logrus.Errorf("Reading information about loopback file %s: %v", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + + // Check if the loopback driver and underlying filesystem agree on the loopback file's + // device and inode numbers. + dev, ino, err := getLoopbackBackingFile(loopFile) + if err != nil { + logrus.Errorf("Getting loopback backing file: %s", err) + return nil, ErrGetLoopbackBackingFile + } + if dev != uint64(st.Dev) || ino != st.Ino { + logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) + } + + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Opening sparse file: %v", err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("While cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go new file mode 100644 index 00000000000..ea6841958dd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux,cgo + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go new file mode 100644 index 00000000000..a50de7f07a2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux,cgo + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go new file mode 100644 index 00000000000..c9be05776d1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux,cgo + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Get loopback backing file: %v", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == uint64(targetDevice) && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go b/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go new file mode 100644 index 00000000000..460b3070992 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/loopback/loopback_unsupported.go @@ -0,0 +1 @@ +package loopback diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000000..3ba99cf9351 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MNT_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MNT_SYNCHRONOUS + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MNT_UPDATE + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MNT_NOATIME + + mntDetach = unix.MNT_FORCE + + NODIRATIME = 0 + NODEV = 0 + DIRSYNC = 0 + MANDLOCK = 0 + BIND = 0 + RBIND = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SLAVE = 0 + RSLAVE = 0 + SHARED = 0 + RSHARED = 0 + RELATIME = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 9afd26d4c06..ee0f593a50a 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index b31cf99d0ff..72ceec3ddad 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -28,14 +28,25 @@ func allocateIOVecs(options []string) []C.struct_iovec { func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true + options := []string{"fspath", target} + + if data != "" { + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + continue + } + opt := strings.SplitN(x, "=", 2) + options = append(options, opt[0]) + if len(opt) == 2 { + options = append(options, opt[1]) + } else { + options = append(options, "") + } } } - options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000000..7738fc7411e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 00000000000..71f205b2852 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 00000000000..7a68bc39bf4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("Error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000000..3d382923686 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package kernel + +import ( + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h windows.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + windows.KEY_READ, + &h); err != nil { + return KVI, err + } + defer windows.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = windows.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = windows.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0xFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000000..e913fad0013 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,17 @@ +package kernel + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 00000000000..49370bd3dd9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000000..1da3f239fac --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/containers/storage/pkg/parsers/parsers.go new file mode 100644 index 00000000000..acc897168f3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 00000000000..a15e3688b9e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 00000000000..dd52b9082f7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000000..6f63ae99170 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -0,0 +1,37 @@ +// +build freebsd + +package reexec + +import ( + "context" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Uses sysctl. +func Self() string { + path, err := unix.SysctlArgs("kern.proc.pathname", -1) + if err == nil { + return path + } + return os.Args[0] +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 9dd8cb9bbee..a56ada2161e 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,5 @@ -// +build freebsd solaris darwin +//go:build solaris || darwin +// +build solaris darwin package reexec diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md new file mode 100644 index 00000000000..b3e454573c3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go new file mode 100644 index 00000000000..66a59c85d56 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -0,0 +1,110 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.EqualFold(s, ss) { + return true + } + } + return false +} + +// RemoveFromSlice removes a string from a slice. The string can be present +// multiple times. The entire slice is iterated. +func RemoveFromSlice(slice []string, s string) (ret []string) { + for _, ss := range slice { + if !strings.EqualFold(s, ss) { + ret = append(ret, ss) + } + } + return ret +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 10355848bdb..6b47c4e717f 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -13,6 +13,9 @@ const ( // Operation not supported EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index bc8b8e3a5fe..3fc27f0b139 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -10,6 +10,9 @@ const ( // Operation not supported EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) ) // Lgetxattr is not supported on platforms other than linux. diff --git a/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go new file mode 100644 index 00000000000..674e0a0baed --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/tarlog/tarlogger.go @@ -0,0 +1,66 @@ +package tarlog + +import ( + "io" + "sync" + + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/archive/tar" +) + +type tarLogger struct { + writer *io.PipeWriter + closeMutex *sync.Mutex + closed bool +} + +// NewLogger returns a writer that, when a tar archive is written to it, calls +// `logger` for each file header it encounters in the archive. +func NewLogger(logger func(*tar.Header)) (io.WriteCloser, error) { + reader, writer := io.Pipe() + t := &tarLogger{ + writer: writer, + closeMutex: new(sync.Mutex), + closed: false, + } + tr := tar.NewReader(reader) + t.closeMutex.Lock() + go func() { + hdr, err := tr.Next() + for err == nil { + logger(hdr) + hdr, err = tr.Next() + + } + // Make sure to avoid writes after the reader has been closed. + if err := reader.Close(); err != nil { + logrus.Errorf("Closing tarlogger reader: %v", err) + } + // Unblock the Close(). + t.closeMutex.Unlock() + }() + return t, nil +} + +func (t *tarLogger) Write(b []byte) (int, error) { + if t.closed { + // We cannot use os.Pipe() as this alters the tar's digest. Using + // io.Pipe() requires this workaround as it does not allow for writes + // after close. + return len(b), nil + } + n, err := t.writer.Write(b) + if err == io.ErrClosedPipe { + // The pipe got closed. Track it and avoid to call Write in future. + t.closed = true + return len(b), nil + } + return n, err +} + +func (t *tarLogger) Close() error { + err := t.writer.Close() + // Wait for the reader to finish. + t.closeMutex.Lock() + return err +} diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go new file mode 100644 index 00000000000..74776e65e6f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go @@ -0,0 +1,139 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + return idx.addID(id) +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs and passes each of them to the given +// handler. Take care that the handler method does not call any public +// method on truncindex as the internal locking is not reentrant/recursive +// and will result in deadlock. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.Lock() + defer idx.Unlock() + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index c352efce0aa..baeb8f1aab5 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package unshare @@ -76,6 +77,28 @@ func getRootlessGID() int { return os.Getegid() } +// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the +// matching file capabilitiy +func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + func (c *Cmd) Start() error { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -215,15 +238,26 @@ func (c *Cmd) Start() error { gidmapSet := false // Set the GID map. if c.UseNewgidmap { - cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newgidmap") + if err != nil { + return errors.Wrapf(err, "error finding newgidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { gidmapSet = true } else { logrus.Warnf("Error running newgidmap: %v: %s", err, g.String()) + isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + logrus.Warnf("Failed to check for setgid on %s: %v", path, err) + } else { + if !isSetgid { + logrus.Warnf("%s should be setgid or have filecaps setgid", path) + } + } logrus.Warnf("Falling back to single mapping") g.Reset() g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) @@ -262,17 +296,29 @@ func (c *Cmd) Start() error { fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) } uidmapSet := false - // Set the GID map. + // Set the UID map. if c.UseNewuidmap { - cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newuidmap") + if err != nil { + return errors.Wrapf(err, "error finding newuidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { uidmapSet = true } else { logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + logrus.Warnf("Failed to check for setuid on %s: %v", path, err) + } else { + if !isSetuid { + logrus.Warnf("%s should be setuid or have filecaps setuid", path) + } + } + logrus.Warnf("Falling back to single mapping") u.Reset() u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf new file mode 100644 index 00000000000..c17dd6d37ea --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf @@ -0,0 +1,210 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "overlay" + +# Temporary storage location +runroot = "/run/containers/storage" + +# Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH +graphroot = "/var/lib/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd new file mode 100644 index 00000000000..cc655c62e56 --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -0,0 +1,205 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "zfs" + +# Temporary storage location +runroot = "/var/run/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "/var/db/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go new file mode 100644 index 00000000000..30d3e8715ee --- /dev/null +++ b/vendor/github.com/containers/storage/store.go @@ -0,0 +1,3746 @@ +package storage + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + // register all of the built-in drivers + _ "github.com/containers/storage/drivers/register" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" + "github.com/containers/storage/pkg/system" + "github.com/containers/storage/types" + "github.com/hashicorp/go-multierror" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + +var ( + stores []*store + storesLock sync.Mutex +) + +// ROFileBasedStore wraps up the methods of the various types of file-based +// data stores that we implement which are needed for both read-only and +// read-write files. +type ROFileBasedStore interface { + Locker + + // Load reloads the contents of the store from disk. It should be called + // with the lock held. + Load() error + + // ReloadIfChanged reloads the contents of the store from disk if it is changed. + ReloadIfChanged() error +} + +// RWFileBasedStore wraps up the methods of various types of file-based data +// stores that we implement using read-write files. +type RWFileBasedStore interface { + // Save saves the contents of the store to disk. It should be called with + // the lock held, and Touch() should be called afterward before releasing the + // lock. + Save() error +} + +// FileBasedStore wraps up the common methods of various types of file-based +// data stores that we implement. +type FileBasedStore interface { + ROFileBasedStore + RWFileBasedStore +} + +// ROMetadataStore wraps a method for reading metadata associated with an ID. +type ROMetadataStore interface { + // Metadata reads metadata associated with an item with the specified ID. + Metadata(id string) (string, error) +} + +// RWMetadataStore wraps a method for setting metadata associated with an ID. +type RWMetadataStore interface { + // SetMetadata updates the metadata associated with the item with the specified ID. + SetMetadata(id, metadata string) error +} + +// MetadataStore wraps up methods for getting and setting metadata associated with IDs. +type MetadataStore interface { + ROMetadataStore + RWMetadataStore +} + +// An ROBigDataStore wraps up the read-only big-data related methods of the +// various types of file-based lookaside stores that we implement. +type ROBigDataStore interface { + // BigData retrieves a (potentially large) piece of data associated with + // this ID, if it has previously been set. + BigData(id, key string) ([]byte, error) + + // BigDataSize retrieves the size of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataSize(id, key string) (int64, error) + + // BigDataDigest retrieves the digest of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + BigDataDigest(id, key string) (digest.Digest, error) + + // BigDataNames() returns a list of the names of previously-stored pieces of + // data. + BigDataNames(id string) ([]string, error) +} + +// A RWImageBigDataStore wraps up how we store big-data associated with images. +type RWImageBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + // Pass github.com/containers/image/manifest.Digest as digestManifest + // to allow ByDigest to find images by their correct digests. + SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error +} + +// A ContainerBigDataStore wraps up how we store big-data associated with containers. +type ContainerBigDataStore interface { + ROBigDataStore + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data []byte) error +} + +// A ROLayerBigDataStore wraps up how we store RO big-data associated with layers. +type ROLayerBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + BigData(id, key string) (io.ReadCloser, error) + + // BigDataNames() returns a list of the names of previously-stored pieces of + // data. + BigDataNames(id string) ([]string, error) +} + +// A RWLayerBigDataStore wraps up how we store big-data associated with layers. +type RWLayerBigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated + // with this ID. + SetBigData(id, key string, data io.Reader) error +} + +// A LayerBigDataStore wraps up how we store big-data associated with layers. +type LayerBigDataStore interface { + ROLayerBigDataStore + RWLayerBigDataStore +} + +// A FlaggableStore can have flags set and cleared on items which it manages. +type FlaggableStore interface { + // ClearFlag removes a named flag from an item in the store. + ClearFlag(id string, flag string) error + + // SetFlag sets a named flag and its value on an item in the store. + SetFlag(id string, flag string, value interface{}) error +} + +type StoreOptions = types.StoreOptions + +// Store wraps up the various types of file-based stores that we use into a +// singleton object that initializes and manages them all together. +type Store interface { + // RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve + // settings that were passed to GetStore() when the object was created. + RunRoot() string + GraphRoot() string + GraphDriverName() string + GraphOptions() []string + UIDMap() []idtools.IDMap + GIDMap() []idtools.IDMap + + // GraphDriver obtains and returns a handle to the graph Driver object used + // by the Store. + GraphDriver() (drivers.Driver, error) + + // CreateLayer creates a new layer in the underlying storage driver, + // optionally having the specified ID (one will be assigned if none is + // specified), with the specified layer (or no layer) as its parent, + // and with optional names. (The writeable flag is ignored.) + CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) + + // PutLayer combines the functions of CreateLayer and ApplyDiff, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) + + // CreateImage creates a new image, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // referring to a specified image, and with optional metadata. An + // image is a record which associates the ID of a layer with a + // additional bookkeeping information which the library stores for the + // convenience of its caller. + CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) + + // CreateContainer creates a new container, optionally with the + // specified ID (one will be assigned if none is specified), with + // optional names, using the specified image's top layer as the basis + // for the container's layer, and assigning the specified ID to that + // layer (one will be created if none is specified). A container is a + // layer which is associated with additional bookkeeping information + // which the library stores for the convenience of its caller. + CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // Metadata retrieves the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to). + Metadata(id string) (string, error) + + // SetMetadata updates the metadata which is associated with a layer, + // image, or container (whichever the passed-in ID refers to) to match + // the specified value. The metadata value can be retrieved at any + // time using Metadata, or using Layer, Image, or Container and reading + // the object directly. + SetMetadata(id, metadata string) error + + // Exists checks if there is a layer, image, or container which has the + // passed-in ID or name. + Exists(id string) bool + + // Status asks for a status report, in the form of key-value pairs, + // from the underlying storage driver. The contents vary from driver + // to driver. + Status() ([][2]string, error) + + // Delete removes the layer, image, or container which has the + // passed-in ID or name. Note that no safety checks are performed, so + // this can leave images with references to layers which do not exist, + // and layers with references to parents which no longer exist. + Delete(id string) error + + // DeleteLayer attempts to remove the specified layer. If the layer is the + // parent of any other layer, or is referred to by any images, it will return + // an error. + DeleteLayer(id string) error + + // DeleteImage removes the specified image if it is not referred to by + // any containers. If its top layer is then no longer referred to by + // any other images and is not the parent of any other layers, its top + // layer will be removed. If that layer's parent is no longer referred + // to by any other images and is not the parent of any other layers, + // then it, too, will be removed. This procedure will be repeated + // until a layer which should not be removed, or the base layer, is + // reached, at which point the list of removed layers is returned. If + // the commit argument is false, the image and layers are not removed, + // but the list of layers which would be removed is still returned. + DeleteImage(id string, commit bool) (layers []string, err error) + + // DeleteContainer removes the specified container and its layer. If + // there is no matching container, or if the container exists but its + // layer does not, an error will be returned. + DeleteContainer(id string) error + + // Wipe removes all known layers, images, and containers. + Wipe() error + + // MountImage mounts an image to temp directory and returns the mount point. + // MountImage allows caller to mount an image. Images will always + // be mounted read/only + MountImage(id string, mountOptions []string, mountLabel string) (string, error) + + // Unmount attempts to unmount an image, given an ID. + // Returns whether or not the layer is still mounted. + UnmountImage(id string, force bool) (bool, error) + + // Mount attempts to mount a layer, image, or container for access, and + // returns the pathname if it succeeds. + // Note if the mountLabel == "", the default label for the container + // will be used. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + Mount(id, mountLabel string) (string, error) + + // Unmount attempts to unmount a layer, image, or container, given an ID, a + // name, or a mount path. Returns whether or not the layer is still mounted. + Unmount(id string, force bool) (bool, error) + + // Mounted returns number of times the layer has been mounted. + Mounted(id string) (int, error) + + // Changes returns a summary of the changes which would need to be made + // to one layer to make its contents the same as a second layer. If + // the first layer is not specified, the second layer's parent is + // assumed. Each Change structure contains a Path relative to the + // layer's root directory, and a Kind which is either ChangeAdd, + // ChangeModify, or ChangeDelete. + Changes(from, to string) ([]archive.Change, error) + + // DiffSize returns a count of the size of the tarstream which would + // specify the changes returned by Changes. + DiffSize(from, to string) (int64, error) + + // Diff returns the tarstream which would specify the changes returned + // by Changes. If options are passed in, they can override default + // behaviors. + Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) + + // ApplyDiff applies a tarstream to a layer. Information about the + // tarstream is cached with the layer. Typically, a layer which is + // populated using a tarstream will be expected to not be modified in + // any other way, either before or after the diff is applied. + // + // Note that we do some of this work in a child process. The calling + // process's main() function needs to import our pkg/reexec package and + // should begin with something like this in order to allow us to + // properly start that child process: + // if reexec.Init() { + // return + // } + ApplyDiff(to string, diff io.Reader) (int64, error) + + // ApplyDiffer applies a diff to a layer. + // It is the caller responsibility to clean the staging directory if it is not + // successfully applied with ApplyDiffFromStagingDirectory. + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + + // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. + ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + + // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagingDirectory(stagingDirectory string) error + + // DifferTarget gets the path to the differ target. + DifferTarget(id string) (string, error) + + // LayersByCompressedDigest returns a slice of the layers with the + // specified compressed digest value recorded for them. + LayersByCompressedDigest(d digest.Digest) ([]Layer, error) + + // LayersByUncompressedDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + + // LayerSize returns a cached approximation of the layer's size, or -1 + // if we don't have a value on hand. + LayerSize(id string) (int64, error) + + // LayerParentOwners returns the UIDs and GIDs of owners of parents of + // the layer's mountpoint for which the layer's UID and GID maps (if + // any are defined) don't contain corresponding IDs. + LayerParentOwners(id string) ([]int, []int, error) + + // Layers returns a list of the currently known layers. + Layers() ([]Layer, error) + + // Images returns a list of the currently known images. + Images() ([]Image, error) + + // Containers returns a list of the currently known containers. + Containers() ([]Container, error) + + // Names returns the list of names for a layer, image, or container. + Names(id string) ([]string, error) + + // Free removes the store from the list of stores + Free() + + // SetNames changes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. + SetNames(id string, names []string) error + + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + + // ListImageBigData retrieves a list of the (possibly large) chunks of + // named data associated with an image. + ListImageBigData(id string) ([]string, error) + + // ImageBigData retrieves a (possibly large) chunk of named data + // associated with an image. + ImageBigData(id, key string) ([]byte, error) + + // ImageBigDataSize retrieves the size of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataSize(id, key string) (int64, error) + + // ImageBigDataDigest retrieves the digest of a (possibly large) chunk + // of named data associated with an image. + ImageBigDataDigest(id, key string) (digest.Digest, error) + + // SetImageBigData stores a (possibly large) chunk of named data + // associated with an image. Pass + // github.com/containers/image/manifest.Digest as digestManifest to + // allow ImagesByDigest to find images by their correct digests. + SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + + // ListLayerBigData retrieves a list of the (possibly large) chunks of + // named data associated with an layer. + ListLayerBigData(id string) ([]string, error) + + // LayerBigData retrieves a (possibly large) chunk of named data + // associated with a layer. + LayerBigData(id, key string) (io.ReadCloser, error) + + // SetLayerBigData stores a (possibly large) chunk of named data + // associated with a layer. + SetLayerBigData(id, key string, data io.Reader) error + + // ImageSize computes the size of the image's layers and ancillary data. + ImageSize(id string) (int64, error) + + // ListContainerBigData retrieves a list of the (possibly large) chunks of + // named data associated with a container. + ListContainerBigData(id string) ([]string, error) + + // ContainerBigData retrieves a (possibly large) chunk of named data + // associated with a container. + ContainerBigData(id, key string) ([]byte, error) + + // ContainerBigDataSize retrieves the size of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataSize(id, key string) (int64, error) + + // ContainerBigDataDigest retrieves the digest of a (possibly large) + // chunk of named data associated with a container. + ContainerBigDataDigest(id, key string) (digest.Digest, error) + + // SetContainerBigData stores a (possibly large) chunk of named data + // associated with a container. + SetContainerBigData(id, key string, data []byte) error + + // ContainerSize computes the size of the container's layer and ancillary + // data. Warning: this is a potentially expensive operation. + ContainerSize(id string) (int64, error) + + // Layer returns a specific layer. + Layer(id string) (*Layer, error) + + // Image returns a specific image. + Image(id string) (*Image, error) + + // ImagesByTopLayer returns a list of images which reference the specified + // layer as their top layer. They will have different IDs and names + // and may have different metadata, big data items, and flags. + ImagesByTopLayer(id string) ([]*Image, error) + + // ImagesByDigest returns a list of images which contain a big data item + // named ImageDigestBigDataKey whose contents have the specified digest. + ImagesByDigest(d digest.Digest) ([]*Image, error) + + // Container returns a specific container. + Container(id string) (*Container, error) + + // ContainerByLayer returns a specific container based on its layer ID or + // name. + ContainerByLayer(id string) (*Container, error) + + // ContainerDirectory returns a path of a directory which the caller + // can use to store data, specific to the container, which the library + // does not directly manage. The directory will be deleted when the + // container is deleted. + ContainerDirectory(id string) (string, error) + + // SetContainerDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // directory. + SetContainerDirectoryFile(id, file string, data []byte) error + + // FromContainerDirectory is a convenience function which reads + // the contents of the specified file relative to the container's + // directory. + FromContainerDirectory(id, file string) ([]byte, error) + + // ContainerRunDirectory returns a path of a directory which the + // caller can use to store data, specific to the container, which the + // library does not directly manage. The directory will be deleted + // when the host system is restarted. + ContainerRunDirectory(id string) (string, error) + + // SetContainerRunDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // run directory. + SetContainerRunDirectoryFile(id, file string, data []byte) error + + // FromContainerRunDirectory is a convenience function which reads + // the contents of the specified file relative to the container's run + // directory. + FromContainerRunDirectory(id, file string) ([]byte, error) + + // ContainerParentOwners returns the UIDs and GIDs of owners of parents + // of the container's layer's mountpoint for which the layer's UID and + // GID maps (if any are defined) don't contain corresponding IDs. + ContainerParentOwners(id string) ([]int, []int, error) + + // Lookup returns the ID of a layer, image, or container with the specified + // name or ID. + Lookup(name string) (string, error) + + // Shutdown attempts to free any kernel resources which are being used + // by the underlying driver. If "force" is true, any mounted (i.e., in + // use) layers are unmounted beforehand. If "force" is not true, then + // layers being in use is considered to be an error condition. A list + // of still-mounted layers is returned along with possible errors. + Shutdown(force bool) (layers []string, err error) + + // Version returns version information, in the form of key-value pairs, from + // the storage package. + Version() ([][2]string, error) + + // GetDigestLock returns digest-specific Locker. + GetDigestLock(digest.Digest) (Locker, error) + + // LayerFromAdditionalLayerStore searches layers from the additional layer store and + // returns the object for handling this. Note that this hasn't been stored to this store + // yet so this needs to be done through PutAs method. + // Releasing AdditionalLayer handler is caller's responsibility. + // This API is experimental and can be changed without bumping the major version number. + LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) +} + +// AdditionalLayer reprents a layer that is contained in the additional layer store +// This API is experimental and can be changed without bumping the major version number. +type AdditionalLayer interface { + // PutAs creates layer based on this handler, using diff contents from the additional + // layer store. + PutAs(id, parent string, names []string) (*Layer, error) + + // UncompressedDigest returns the uncompressed digest of this layer + UncompressedDigest() digest.Digest + + // CompressedSize returns the compressed size of this layer + CompressedSize() int64 + + // Release tells the additional layer store that we don't use this handler. + Release() +} + +type AutoUserNsOptions = types.AutoUserNsOptions + +type IDMappingOptions = types.IDMappingOptions + +// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods. +type LayerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this layer. If nothing is specified, the layer will + // inherit settings from its parent layer or, if it has no parent + // layer, the Store object. + types.IDMappingOptions + // TemplateLayer is the ID of a layer whose contents will be used to + // initialize this layer. If set, it should be a child of the layer + // which we want to use as the parent of the new layer. + TemplateLayer string + // OriginalDigest specifies a digest of the tarstream (diff), if one is + // provided along with these LayerOptions, and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + OriginalDigest digest.Digest + // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”) + // of the tarstream (diff), if one is provided along with these LayerOptions, + // and reliably known by the caller. + // Use the default "" if this fields is not applicable or the value is not known. + UncompressedDigest digest.Digest +} + +// ImageOptions is used for passing options to a Store's CreateImage() method. +type ImageOptions struct { + // CreationDate, if not zero, will override the default behavior of marking the image as having been + // created when CreateImage() was called, recording CreationDate instead. + CreationDate time.Time + // Digest is a hard-coded digest value that we can use to look up the image. It is optional. + Digest digest.Digest +} + +// ContainerOptions is used for passing options to a Store's CreateContainer() method. +type ContainerOptions struct { + // IDMappingOptions specifies the type of ID mapping which should be + // used for this container's layer. If nothing is specified, the + // container's layer will inherit settings from the image's top layer + // or, if it is not being created based on an image, the Store object. + types.IDMappingOptions + LabelOpts []string + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string +} + +type store struct { + lastLoaded time.Time + runRoot string + graphLock Locker + usernsLock Locker + graphRoot string + graphDriverName string + graphOptions []string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + autoUsernsUser string + additionalUIDs *idSet // Set by getAvailableIDs() + additionalGIDs *idSet // Set by getAvailableIDs() + autoNsMinSize uint32 + autoNsMaxSize uint32 + graphDriver drivers.Driver + layerStore LayerStore + roLayerStores []ROLayerStore + imageStore ImageStore + roImageStores []ROImageStore + containerStore ContainerStore + digestLockRoot string + disableVolatile bool +} + +// GetStore attempts to find an already-created Store object matching the +// specified location and graph driver, and if it can't, it creates and +// initializes a new Store object, and the underlying storage that it controls. +// +// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used. +// +// These defaults observe environment variables: +// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use +// * `STORAGE_OPTS` for the string of options to pass to the driver +// +// Note that we do some of this work in a child process. The calling process's +// main() function needs to import our pkg/reexec package and should begin with +// something like this in order to allow us to properly start that child +// process: +// if reexec.Init() { +// return +// } +func GetStore(options types.StoreOptions) (Store, error) { + if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { + options = types.Options() + } + + if options.GraphRoot != "" { + dir, err := filepath.Abs(options.GraphRoot) + if err != nil { + return nil, err + } + options.GraphRoot = dir + } + if options.RunRoot != "" { + dir, err := filepath.Abs(options.RunRoot) + if err != nil { + return nil, err + } + options.RunRoot = dir + } + + storesLock.Lock() + defer storesLock.Unlock() + + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first + for _, s := range stores { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + return s, nil + } + } + + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified") + case options.GraphRoot == "": + options.GraphRoot = types.Options().GraphRoot + case options.RunRoot == "": + options.RunRoot = types.Options().RunRoot + } + + if err := os.MkdirAll(options.RunRoot, 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(options.GraphRoot, 0700); err != nil { + return nil, err + } + for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { + if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil { + return nil, err + } + } + + graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) + if err != nil { + return nil, err + } + + usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock")) + if err != nil { + return nil, err + } + + autoNsMinSize := options.AutoNsMinSize + autoNsMaxSize := options.AutoNsMaxSize + if autoNsMinSize == 0 { + autoNsMinSize = AutoUserNsMinSize + } + if autoNsMaxSize == 0 { + autoNsMaxSize = AutoUserNsMaxSize + } + s := &store{ + runRoot: options.RunRoot, + graphLock: graphLock, + graphRoot: options.GraphRoot, + graphDriverName: options.GraphDriverName, + graphOptions: options.GraphDriverOptions, + uidMap: copyIDMap(options.UIDMap), + gidMap: copyIDMap(options.GIDMap), + autoUsernsUser: options.RootAutoNsUser, + autoNsMinSize: autoNsMinSize, + autoNsMaxSize: autoNsMaxSize, + additionalUIDs: nil, + additionalGIDs: nil, + usernsLock: usernsLock, + disableVolatile: options.DisableVolatile, + } + if err := s.load(); err != nil { + return nil, err + } + + stores = append(stores, s) + + return s, nil +} + +func copyUint32Slice(slice []uint32) []uint32 { + m := []uint32{} + if slice != nil { + m = make([]uint32, len(slice)) + copy(m, slice) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { + m := []idtools.IDMap{} + if idmap != nil { + m = make([]idtools.IDMap, len(idmap)) + copy(m, idmap) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func (s *store) RunRoot() string { + return s.runRoot +} + +func (s *store) GraphDriverName() string { + return s.graphDriverName +} + +func (s *store) GraphRoot() string { + return s.graphRoot +} + +func (s *store) GraphOptions() []string { + return s.graphOptions +} + +func (s *store) UIDMap() []idtools.IDMap { + return copyIDMap(s.uidMap) +} + +func (s *store) GIDMap() []idtools.IDMap { + return copyIDMap(s.gidMap) +} + +func (s *store) load() error { + driver, err := s.GraphDriver() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + driverPrefix := s.graphDriverName + "-" + + gipath := filepath.Join(s.graphRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0700); err != nil { + return err + } + ris, err := newImageStore(gipath) + if err != nil { + return err + } + s.imageStore = ris + if _, err := s.ROImageStores(); err != nil { + return err + } + + gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return err + } + rcs, err := newContainerStore(gcpath) + if err != nil { + return err + } + rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return err + } + s.containerStore = rcs + + for _, store := range driver.AdditionalImageStores() { + gipath := filepath.Join(store, driverPrefix+"images") + ris, err := newROImageStore(gipath) + if err != nil { + return err + } + s.roImageStores = append(s.roImageStores, ris) + } + + s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks") + if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil { + return err + } + + return nil +} + +// GetDigestLock returns a digest-specific Locker. +func (s *store) GetDigestLock(d digest.Digest) (Locker, error) { + return GetLockfile(filepath.Join(s.digestLockRoot, d.String())) +} + +func (s *store) getGraphDriver() (drivers.Driver, error) { + if s.graphDriver != nil { + return s.graphDriver, nil + } + config := drivers.Options{ + Root: s.graphRoot, + RunRoot: s.runRoot, + DriverOptions: s.graphOptions, + UIDMaps: s.uidMap, + GIDMaps: s.gidMap, + } + driver, err := drivers.New(s.graphDriverName, config) + if err != nil { + return nil, err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return driver, nil +} + +func (s *store) GraphDriver() (drivers.Driver, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + return s.getGraphDriver() +} + +// LayerStore obtains and returns a handle to the writeable layer store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) LayerStore() (LayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + if s.layerStore != nil { + return s.layerStore, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0700); err != nil { + return nil, err + } + rls, err := s.newLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.layerStore = rls + return s.layerStore, nil +} + +// ROLayerStores obtains additional read/only layer store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not part of the exported Store interface. +func (s *store) ROLayerStores() ([]ROLayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.roLayerStores != nil { + return s.roLayerStores, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + for _, store := range driver.AdditionalImageStores() { + glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.roLayerStores = append(s.roLayerStores, rls) + } + return s.roLayerStores, nil +} + +// ImageStore obtains and returns a handle to the writable image store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ImageStore() (ImageStore, error) { + if s.imageStore != nil { + return s.imageStore, nil + } + return nil, ErrLoadError +} + +// ROImageStores obtains additional read/only image store objects used by the +// Store. Accessing these stores directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ROImageStores() ([]ROImageStore, error) { + if s.imageStore == nil { + return nil, ErrLoadError + } + + return s.roImageStores, nil +} + +// ContainerStore obtains and returns a handle to the container store object +// used by the Store. Accessing this store directly will bypass locking and +// synchronization, so it is not a part of the exported Store interface. +func (s *store) ContainerStore() (ContainerStore, error) { + if s.containerStore != nil { + return s.containerStore, nil + } + return nil, ErrLoadError +} + +func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { + if s.graphDriver == nil || !s.graphDriver.SupportsShifting() { + return false + } + if uidmap != nil && !idtools.IsContiguous(uidmap) { + return false + } + if gidmap != nil && !idtools.IsContiguous(gidmap) { + return false + } + return true +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) { + var parentLayer *Layer + rlstore, err := s.LayerStore() + if err != nil { + return nil, -1, err + } + rlstores, err := s.ROLayerStores() + if err != nil { + return nil, -1, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, -1, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, -1, err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, -1, err + } + if id == "" { + id = stringid.GenerateRandomID() + } + if options == nil { + options = &LayerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + uidMap := options.UIDMap + gidMap := options.GIDMap + if parent != "" { + var ilayer *Layer + for _, l := range append([]ROLayerStore{rlstore}, rlstores...) { + lstore := l + if lstore != rlstore { + lstore.RLock() + defer lstore.Unlock() + if err := lstore.ReloadIfChanged(); err != nil { + return nil, -1, err + } + } + if l, err := lstore.Get(parent); err == nil && l != nil { + ilayer = l + parent = ilayer.ID + break + } + } + if ilayer == nil { + return nil, -1, ErrLayerUnknown + } + parentLayer = ilayer + containers, err := rcstore.Containers() + if err != nil { + return nil, -1, err + } + for _, container := range containers { + if container.LayerID == parent { + return nil, -1, ErrParentIsContainer + } + } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } else { + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + layerOptions := LayerOptions{ + OriginalDigest: options.OriginalDigest, + UncompressedDigest: options.UncompressedDigest, + } + if s.canUseShifting(uidMap, gidMap) { + layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} + } else { + layerOptions.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + } + } + return rlstore.Put(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, nil, diff) +} + +func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { + layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, options, nil) + return layer, err +} + +func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { + if id == "" { + id = stringid.GenerateRandomID() + } + + if layer != "" { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + var ilayer *Layer + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + if store == lstore { + store.Lock() + } else { + store.RLock() + } + defer store.Unlock() + err := store.ReloadIfChanged() + if err != nil { + return nil, err + } + ilayer, err = store.Get(layer) + if err == nil { + break + } + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID + } + + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return nil, err + } + + creationDate := time.Now().UTC() + if options != nil && !options.CreationDate.IsZero() { + creationDate = options.CreationDate + } + + return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) +} + +func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options types.IDMappingOptions) (*Layer, error) { + layerMatchesMappingOptions := func(layer *Layer, options types.IDMappingOptions) bool { + // If the driver supports shifting and the layer has no mappings, we can use it. + if s.canUseShifting(options.UIDMap, options.GIDMap) && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 { + return true + } + // If we want host mapping, and the layer uses mappings, it's not the best match. + if options.HostUIDMapping && len(layer.UIDMap) != 0 { + return false + } + if options.HostGIDMapping && len(layer.GIDMap) != 0 { + return false + } + // Compare the maps. + return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) + } + var layer, parentLayer *Layer + allStores := append([]ROLayerStore{rlstore}, lstores...) + // Locate the image's top layer and its parent, if it has one. + for _, s := range allStores { + store := s + if store != rlstore { + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + } + // Walk the top layer list. + for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if cLayer, err := store.Get(candidate); err == nil { + // We want the layer's parent, too, if it has one. + var cParentLayer *Layer + if cLayer.Parent != "" { + // Its parent should be in one of the stores, somewhere. + for _, ps := range allStores { + if cParentLayer, err = ps.Get(cLayer.Parent); err == nil { + break + } + } + if cParentLayer == nil { + continue + } + } + // If the layer matches the desired mappings, it's a perfect match, + // so we're actually done here. + if layerMatchesMappingOptions(cLayer, options) { + return cLayer, nil + } + // Record the first one that we found, even if it's not ideal, so that + // we have a starting point. + if layer == nil { + layer = cLayer + parentLayer = cParentLayer + } + } + } + } + if layer == nil { + return nil, ErrLayerUnknown + } + // The top layer's mappings don't match the ones we want, but it's in a read-only + // image store, so we can't create and add a mapped copy of the layer to the image. + // We'll have to do the mapping for the container itself, elsewhere. + if !createMappedLayer { + return layer, nil + } + // The top layer's mappings don't match the ones we want, and it's in an image store + // that lets us edit image metadata... + if istore, ok := ristore.(*imageStore); ok { + // ... so create a duplicate of the layer with the desired mappings, and + // register it as an alternate top layer in the image. + var layerOptions LayerOptions + if s.canUseShifting(options.UIDMap, options.GIDMap) { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + } else { + layerOptions = LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + }, + } + } + layerOptions.TemplateLayer = layer.ID + mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) + if err != nil { + return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID) + } + if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2)) + } + return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID) + } + layer = mappedLayer + } + return layer, nil +} + +func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { + if options == nil { + options = &ContainerOptions{} + } + if options.HostUIDMapping { + options.UIDMap = nil + } + if options.HostGIDMapping { + options.GIDMap = nil + } + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + if id == "" { + id = stringid.GenerateRandomID() + } + + var imageTopLayer *Layer + imageID := "" + + if options.AutoUserNs || options.UIDMap != nil || options.GIDMap != nil { + // Prevent multiple instances to retrieve the same range when AutoUserNs + // are used. + // It doesn't prevent containers that specify an explicit mapping to overlap + // with AutoUserNs. + s.usernsLock.Lock() + defer s.usernsLock.Unlock() + } + + var imageHomeStore ROImageStore + var istore ImageStore + var istores []ROImageStore + var lstores []ROLayerStore + var cimage *Image + if image != "" { + var err error + lstores, err = s.ROLayerStores() + if err != nil { + return nil, err + } + istore, err = s.ImageStore() + if err != nil { + return nil, err + } + istores, err = s.ROImageStores() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + if store == istore { + store.Lock() + } else { + store.RLock() + } + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + cimage, err = store.Get(image) + if err == nil { + imageHomeStore = store + break + } + } + if cimage == nil { + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + imageID = cimage.ID + } + + if options.AutoUserNs { + var err error + options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage) + if err != nil { + return nil, err + } + } + + uidMap := options.UIDMap + gidMap := options.GIDMap + + idMappingsOptions := options.IDMappingOptions + if image != "" { + if cimage.TopLayer != "" { + createMappedLayer := imageHomeStore == istore + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions) + if err != nil { + return nil, err + } + imageTopLayer = ilayer + + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = ilayer.UIDMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = ilayer.GIDMap + } + } + } else { + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + if !options.HostUIDMapping && len(options.UIDMap) == 0 { + uidMap = s.uidMap + } + if !options.HostGIDMapping && len(options.GIDMap) == 0 { + gidMap = s.gidMap + } + } + var layerOptions *LayerOptions + if s.canUseShifting(uidMap, gidMap) { + layerOptions = &LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + } else { + layerOptions = &LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: idMappingsOptions.HostUIDMapping, + HostGIDMapping: idMappingsOptions.HostGIDMapping, + UIDMap: copyIDMap(uidMap), + GIDMap: copyIDMap(gidMap), + }, + } + } + if options.Flags == nil { + options.Flags = make(map[string]interface{}) + } + plabel, _ := options.Flags["ProcessLabel"].(string) + mlabel, _ := options.Flags["MountLabel"].(string) + if (plabel == "" && mlabel != "") || + (plabel != "" && mlabel == "") { + return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified") + } + + if plabel == "" { + processLabel, mountLabel, err := label.InitLabels(options.LabelOpts) + if err != nil { + return nil, err + } + options.Flags["ProcessLabel"] = processLabel + options.Flags["MountLabel"] = mountLabel + } + + clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true) + if err != nil { + return nil, err + } + layer = clayer.ID + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + options.IDMappingOptions = types.IDMappingOptions{ + HostUIDMapping: len(options.UIDMap) == 0, + HostGIDMapping: len(options.GIDMap) == 0, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + } + container, err := rcstore.Create(id, names, imageID, layer, metadata, options) + if err != nil || container == nil { + rlstore.Delete(layer) + } + return container, err +} + +func (s *store) SetMetadata(id, metadata string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rlstore.Exists(id) { + return rlstore.SetMetadata(id, metadata) + } + if ristore.Exists(id) { + return ristore.SetMetadata(id, metadata) + } + if rcstore.Exists(id) { + return rcstore.SetMetadata(id, metadata) + } + return ErrNotAnID +} + +func (s *store) Metadata(id string) (string, error) { + lstore, err := s.LayerStore() + if err != nil { + return "", err + } + lstores, err := s.ROLayerStores() + if err != nil { + return "", err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if store.Exists(id) { + return store.Metadata(id) + } + } + + istore, err := s.ImageStore() + if err != nil { + return "", err + } + istores, err := s.ROImageStores() + if err != nil { + return "", err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if store.Exists(id) { + return store.Metadata(id) + } + } + + cstore, err := s.ContainerStore() + if err != nil { + return "", err + } + cstore.RLock() + defer cstore.Unlock() + if err := cstore.ReloadIfChanged(); err != nil { + return "", err + } + if cstore.Exists(id) { + return cstore.Metadata(id) + } + return "", ErrNotAnID +} + +func (s *store) ListImageBigData(id string) ([]string, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + bigDataNames, err := store.BigDataNames(id) + if err == nil { + return bigDataNames, err + } + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (s *store) ImageBigDataSize(id, key string) (int64, error) { + istore, err := s.ImageStore() + if err != nil { + return -1, err + } + istores, err := s.ROImageStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + size, err := store.BigDataSize(id, key) + if err == nil { + return size, nil + } + } + return -1, ErrSizeUnknown +} + +func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { + ristore, err := s.ImageStore() + if err != nil { + return "", err + } + stores, err := s.ROImageStores() + if err != nil { + return "", err + } + stores = append([]ROImageStore{ristore}, stores...) + for _, r := range stores { + ristore := r + ristore.RLock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return "", err + } + d, err := ristore.BigDataDigest(id, key) + if err == nil && d.Validate() == nil { + return d, nil + } + } + return "", ErrDigestUnknown +} + +func (s *store) ImageBigData(id, key string) ([]byte, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + foundImage := false + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + data, err := store.BigData(id, key) + if err == nil { + return data, nil + } + if store.Exists(id) { + foundImage = true + } + } + if foundImage { + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id) + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +// ListLayerBigData retrieves a list of the (possibly large) chunks of +// named data associated with an layer. +func (s *store) ListLayerBigData(id string) ([]string, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + foundLayer := false + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + data, err := store.BigDataNames(id) + if err == nil { + return data, nil + } + if store.Exists(id) { + foundLayer = true + } + } + if foundLayer { + return nil, errors.Wrapf(os.ErrNotExist, "error locating big data for layer with ID %q", id) + } + return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) +} + +// LayerBigData retrieves a (possibly large) chunk of named data +// associated with a layer. +func (s *store) LayerBigData(id, key string) (io.ReadCloser, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + foundLayer := false + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + data, err := store.BigData(id, key) + if err == nil { + return data, nil + } + if store.Exists(id) { + foundLayer = true + } + } + if foundLayer { + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for layer with ID %q", key, id) + } + return nil, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", id) +} + +// SetLayerBigData stores a (possibly large) chunk of named data +// associated with a layer. +func (s *store) SetLayerBigData(id, key string, data io.Reader) error { + store, err := s.LayerStore() + if err != nil { + return err + } + + store.Lock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return err + } + return store.SetBigData(id, key, data) +} + +func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error { + ristore, err := s.ImageStore() + if err != nil { + return err + } + + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + + return ristore.SetBigData(id, key, data, digestManifest) +} + +func (s *store) ImageSize(id string) (int64, error) { + var image *Image + + lstore, err := s.LayerStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary layer store data") + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional layer stores") + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + } + + var imageStore ROBigDataStore + istore, err := s.ImageStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary image store data") + } + istores, err := s.ROImageStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional image stores") + } + + // Look for the image's record. + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + if image, err = store.Get(id); err == nil { + imageStore = store + break + } + } + if image == nil { + return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + + // Start with a list of the image's top layers, if it has any. + queue := make(map[string]struct{}) + for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if layerID != "" { + queue[layerID] = struct{}{} + } + } + visited := make(map[string]struct{}) + // Walk all of the layers. + var size int64 + for len(visited) < len(queue) { + for layerID := range queue { + // Visit each layer only once. + if _, ok := visited[layerID]; ok { + continue + } + visited[layerID] = struct{}{} + // Look for the layer and the store that knows about it. + var layerStore ROLayerStore + var layer *Layer + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(layerID); err == nil { + layerStore = store + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID) + } + // The UncompressedSize is only valid if there's a digest to go with it. + n := layer.UncompressedSize + if layer.UncompressedDigest == "" { + // Compute the size. + n, err = layerStore.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID) + } + } + // Count this layer. + size += n + // Make a note to visit the layer's parent if we haven't already. + if layer.Parent != "" { + queue[layer.Parent] = struct{}{} + } + } + } + + // Count big data items. + names, err := imageStore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id) + } + for _, name := range names { + n, err := imageStore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id) + } + size += n + } + + return size, nil +} + +func (s *store) ContainerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + } + + // Get the location of the container directory and container run directory. + // Do it before we lock the container store because they do, too. + cdir, err := s.ContainerDirectory(id) + if err != nil { + return -1, err + } + rdir, err := s.ContainerRunDirectory(id) + if err != nil { + return -1, err + } + + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return -1, err + } + + // Read the container record. + container, err := rcstore.Get(id) + if err != nil { + return -1, err + } + + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID) + } + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID) + } + + // Count big data items. + names, err := rcstore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID) + } + for _, name := range names { + n, err := rcstore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id) + } + size += n + } + + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) + if err != nil { + return -1, err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return -1, err + } + size += n + + return size, nil +} + +func (s *store) ListContainerBigData(id string) ([]string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + + return rcstore.BigDataNames(id) +} + +func (s *store) ContainerBigDataSize(id, key string) (int64, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return -1, err + } + return rcstore.BigDataSize(id, key) +} + +func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + return rcstore.BigDataDigest(id, key) +} + +func (s *store) ContainerBigData(id, key string) ([]byte, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + return rcstore.BigData(id, key) +} + +func (s *store) SetContainerBigData(id, key string, data []byte) error { + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + return rcstore.SetBigData(id, key, data) +} + +func (s *store) Exists(id string) bool { + lstore, err := s.LayerStore() + if err != nil { + return false + } + lstores, err := s.ROLayerStores() + if err != nil { + return false + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return false + } + if store.Exists(id) { + return true + } + } + + istore, err := s.ImageStore() + if err != nil { + return false + } + istores, err := s.ROImageStores() + if err != nil { + return false + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return false + } + if store.Exists(id) { + return true + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return false + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return false + } + if rcstore.Exists(id) { + return true + } + + return false +} + +func dedupeNames(names []string) []string { + seen := make(map[string]bool) + deduped := make([]string, 0, len(names)) + for _, name := range names { + if _, wasSeen := seen[name]; !wasSeen { + seen[name] = true + deduped = append(deduped, name) + } + } + return deduped +} + +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. +func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { + deduped := dedupeNames(names) + + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + if rlstore.Exists(id) { + switch op { + case setNames: + return rlstore.SetNames(id, deduped) + case removeNames: + return rlstore.RemoveNames(id, deduped) + case addNames: + return rlstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } + } + + ristore, err := s.ImageStore() + if err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + if ristore.Exists(id) { + switch op { + case setNames: + return ristore.SetNames(id, deduped) + case removeNames: + return ristore.RemoveNames(id, deduped) + case addNames: + return ristore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } + } + + // Check is id refers to a RO Store + ristores, err := s.ROImageStores() + if err != nil { + return err + } + for _, s := range ristores { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return err + } + if i, err := store.Get(id); err == nil { + if len(deduped) > 1 { + // Do not want to create image name in R/W storage + deduped = deduped[1:] + } + _, err := ristore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest) + if err == nil { + return ristore.Save() + } + return err + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + if rcstore.Exists(id) { + switch op { + case setNames: + return rcstore.SetNames(id, deduped) + case removeNames: + return rcstore.RemoveNames(id, deduped) + case addNames: + return rcstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } + } + return ErrLayerUnknown +} + +func (s *store) Names(id string) ([]string, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + if l, err := store.Get(id); l != nil && err == nil { + return l.Names, nil + } + } + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + if i, err := store.Get(id); i != nil && err == nil { + return i.Names, nil + } + } + + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + if c, err := rcstore.Get(id); c != nil && err == nil { + return c.Names, nil + } + return nil, ErrLayerUnknown +} + +func (s *store) Lookup(name string) (string, error) { + lstore, err := s.LayerStore() + if err != nil { + return "", err + } + lstores, err := s.ROLayerStores() + if err != nil { + return "", err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if l, err := store.Get(name); l != nil && err == nil { + return l.ID, nil + } + } + + istore, err := s.ImageStore() + if err != nil { + return "", err + } + istores, err := s.ROImageStores() + if err != nil { + return "", err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return "", err + } + if i, err := store.Get(name); i != nil && err == nil { + return i.ID, nil + } + } + + cstore, err := s.ContainerStore() + if err != nil { + return "", err + } + cstore.RLock() + defer cstore.Unlock() + if err := cstore.ReloadIfChanged(); err != nil { + return "", err + } + if c, err := cstore.Get(name); c != nil && err == nil { + return c.ID, nil + } + + return "", ErrLayerUnknown +} + +func (s *store) DeleteLayer(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rlstore.Exists(id) { + if l, err := rlstore.Get(id); err != nil { + id = l.ID + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return errors.Wrapf(ErrLayerHasChildren, "used by layer %v", layer.ID) + } + } + images, err := ristore.Images() + if err != nil { + return err + } + + for _, image := range images { + if image.TopLayer == id { + return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + } + if stringutils.InSlice(image.MappedTopLayers, id) { + // No write access to the image store, fail before the layer is deleted + if _, ok := ristore.(*imageStore); !ok { + return errors.Wrapf(ErrLayerUsedByImage, "layer %v used by image %v", id, image.ID) + } + } + } + containers, err := rcstore.Containers() + if err != nil { + return err + } + for _, container := range containers { + if container.LayerID == id { + return errors.Wrapf(ErrLayerUsedByContainer, "layer %v used by container %v", id, container.ID) + } + } + if err := rlstore.Delete(id); err != nil { + return errors.Wrapf(err, "delete layer %v", id) + } + + // The check here is used to avoid iterating the images if we don't need to. + // There is already a check above for the imageStore to be writeable when the layer is part of MappedTopLayers. + if istore, ok := ristore.(*imageStore); ok { + for _, image := range images { + if stringutils.InSlice(image.MappedTopLayers, id) { + if err = istore.removeMappedTopLayer(image.ID, id); err != nil { + return errors.Wrapf(err, "remove mapped top layer %v from image %v", id, image.ID) + } + } + } + } + return nil + } + return ErrNotALayer +} + +func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + ristore, err := s.ImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return nil, err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + layersToRemove := []string{} + if ristore.Exists(id) { + image, err := ristore.Get(id) + if err != nil { + return nil, err + } + id = image.ID + containers, err := rcstore.Containers() + if err != nil { + return nil, err + } + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID + } + if container, ok := aContainerByImage[id]; ok { + return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container) + } + images, err := ristore.Images() + if err != nil { + return nil, err + } + layers, err := rlstore.Layers() + if err != nil { + return nil, err + } + childrenByParent := make(map[string][]string) + for _, layer := range layers { + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) + } + otherImagesTopLayers := make(map[string]struct{}) + for _, img := range images { + if img.ID != id { + otherImagesTopLayers[img.TopLayer] = struct{}{} + for _, layerID := range img.MappedTopLayers { + otherImagesTopLayers[layerID] = struct{}{} + } + } + } + if commit { + if err = ristore.Delete(id); err != nil { + return nil, err + } + } + layer := image.TopLayer + layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } + for layer != "" { + if rcstore.Exists(layer) { + break + } + if _, used := otherImagesTopLayers[layer]; used { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + hasChildrenNotBeingRemoved := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue + } + return true + } + } + } + return false + } + if hasChildrenNotBeingRemoved() { + break + } + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} + layer = parent + } + } else { + return nil, ErrNotAnImage + } + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return nil, err + } + } + } + return layersToRemove, nil +} + +func (s *store) DeleteContainer(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rcstore.Exists(id) { + if container, err := rcstore.Get(id); err == nil { + errChan := make(chan error) + var wg sync.WaitGroup + + if rlstore.Exists(container.LayerID) { + wg.Add(1) + go func() { + errChan <- rlstore.Delete(container.LayerID) + wg.Done() + }() + } + wg.Add(1) + go func() { + errChan <- rcstore.Delete(id) + wg.Done() + }() + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) + }() + + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) + wg.Add(1) + go func() { + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) + }() + + go func() { + wg.Wait() + close(errChan) + }() + + var errors []error + for err := range errChan { + if err != nil { + errors = append(errors, err) + } + } + return multierror.Append(nil, errors...).ErrorOrNil() + } + } + return ErrNotAContainer +} + +func (s *store) Delete(id string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if rcstore.Exists(id) { + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } + return ErrNotALayer + } + } + if ristore.Exists(id) { + return ristore.Delete(id) + } + if rlstore.Exists(id) { + return rlstore.Delete(id) + } + return ErrLayerUnknown +} + +func (s *store) Wipe() error { + rcstore, err := s.ContainerStore() + if err != nil { + return err + } + ristore, err := s.ImageStore() + if err != nil { + return err + } + rlstore, err := s.LayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return err + } + ristore.Lock() + defer ristore.Unlock() + if err := ristore.ReloadIfChanged(); err != nil { + return err + } + rcstore.Lock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return err + } + + if err = rcstore.Wipe(); err != nil { + return err + } + if err = ristore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() +} + +func (s *store) Status() ([][2]string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + return rlstore.Status() +} + +func (s *store) Version() ([][2]string, error) { + return [][2]string{}, nil +} + +func (s *store) mount(id string, options drivers.MountOpts) (string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return "", err + } + + s.graphLock.Lock() + defer s.graphLock.Unlock() + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return "", err + } + + modified, err := s.graphLock.Modified() + if err != nil { + return "", err + } + + /* We need to make sure the home mount is present when the Mount is done. */ + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return "", err + } + s.lastLoaded = time.Now() + } + + if options.UidMaps != nil || options.GidMaps != nil { + options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) + } + + if rlstore.Exists(id) { + return rlstore.Mount(id, options) + } + return "", ErrLayerUnknown +} + +func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) { + // Append ReadOnly option to mountOptions + img, err := s.Image(id) + if err != nil { + return "", err + } + + if err := validateMountOptions(mountOpts); err != nil { + return "", err + } + options := drivers.MountOpts{ + MountLabel: mountLabel, + Options: append(mountOpts, "ro"), + } + + return s.mount(img.TopLayer, options) +} + +func (s *store) Mount(id, mountLabel string) (string, error) { + options := drivers.MountOpts{ + MountLabel: mountLabel, + } + // check if `id` is a container, then grab the LayerID, uidmap and gidmap, along with + // otherwise we assume the id is a LayerID and attempt to mount it. + if container, err := s.Container(id); err == nil { + id = container.LayerID + options.UidMaps = container.UIDMap + options.GidMaps = container.GIDMap + options.Options = container.MountOpts() + if !s.disableVolatile { + if v, found := container.Flags["Volatile"]; found { + options.Volatile = v.(bool) + } + } + } + return s.mount(id, options) +} + +func (s *store) Mounted(id string) (int, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.LayerStore() + if err != nil { + return 0, err + } + rlstore.RLock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return 0, err + } + + return rlstore.Mounted(id) +} + +func (s *store) UnmountImage(id string, force bool) (bool, error) { + img, err := s.Image(id) + if err != nil { + return false, err + } + return s.Unmount(img.TopLayer, force) +} + +func (s *store) Unmount(id string, force bool) (bool, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.LayerStore() + if err != nil { + return false, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return false, err + } + if rlstore.Exists(id) { + return rlstore.Unmount(id, force) + } + return false, ErrLayerUnknown +} + +func (s *store) Changes(from, to string) ([]archive.Change, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + if store.Exists(to) { + return store.Changes(from, to) + } + } + return nil, ErrLayerUnknown +} + +func (s *store) DiffSize(from, to string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + if store.Exists(to) { + return store.DiffSize(from, to) + } + } + return -1, ErrLayerUnknown +} + +func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + s.graphLock.Lock() + defer s.graphLock.Unlock() + + modified, err := s.graphLock.Modified() + if err != nil { + return nil, err + } + + // We need to make sure the home mount is present when the Mount is done. + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return nil, err + } + s.lastLoaded = time.Now() + } + + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + if err := store.ReloadIfChanged(); err != nil { + store.Unlock() + return nil, err + } + if store.Exists(to) { + rc, err := store.Diff(from, to, options) + if rc != nil && err == nil { + wrapped := ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + store.Unlock() + return err + }) + return wrapped, nil + } + store.Unlock() + return rc, err + } + store.Unlock() + } + return nil, ErrLayerUnknown +} + +func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + if !rlstore.Exists(to) { + return ErrLayerUnknown + } + return rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) +} + +func (s *store) CleanupStagingDirectory(stagingDirectory string) error { + rlstore, err := s.LayerStore() + if err != nil { + return err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return err + } + } + return rlstore.CleanupStagingDirectory(stagingDirectory) +} + +func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return nil, err + } + } + if to != "" && !rlstore.Exists(to) { + return nil, ErrLayerUnknown + } + return rlstore.ApplyDiffWithDiffer(to, options, differ) +} + +func (s *store) DifferTarget(id string) (string, error) { + rlstore, err := s.LayerStore() + if err != nil { + return "", err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + if err = rlstore.Load(); err != nil { + return "", err + } + } + if rlstore.Exists(id) { + return rlstore.DifferTarget(id) + } + return "", ErrLayerUnknown +} + +func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { + rlstore, err := s.LayerStore() + if err != nil { + return -1, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return -1, err + } + if rlstore.Exists(to) { + return rlstore.ApplyDiff(to, diff) + } + return -1, ErrLayerUnknown +} + +func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { + var layers []Layer + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + storeLayers, err := m(store, d) + if err != nil { + if errors.Cause(err) != ErrLayerUnknown { + return nil, err + } + continue + } + layers = append(layers, storeLayers...) + } + if len(layers) == 0 { + return nil, ErrLayerUnknown + } + return layers, nil +} + +func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) + } + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) +} + +func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) + } + return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) +} + +func (s *store) LayerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return -1, err + } + if store.Exists(id) { + return store.Size(id) + } + } + return -1, ErrLayerUnknown +} + +func (s *store) LayerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rlstore.RLock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, nil, err + } + if rlstore.Exists(id) { + return rlstore.ParentOwners(id) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) ContainerParentOwners(id string) ([]int, []int, error) { + rlstore, err := s.LayerStore() + if err != nil { + return nil, nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, nil, err + } + rlstore.RLock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, nil, err + } + container, err := rcstore.Get(id) + if err != nil { + return nil, nil, err + } + if rlstore.Exists(container.LayerID) { + return rlstore.ParentOwners(container.LayerID) + } + return nil, nil, ErrLayerUnknown +} + +func (s *store) Layers() ([]Layer, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + + layers, err := func() ([]Layer, error) { + lstore.Lock() + defer lstore.Unlock() + if err := lstore.Load(); err != nil { + return nil, err + } + return lstore.Layers() + }() + if err != nil { + return nil, err + } + + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + + for _, s := range lstores { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + storeLayers, err := store.Layers() + if err != nil { + return nil, err + } + layers = append(layers, storeLayers...) + } + return layers, nil +} + +func (s *store) Images() ([]Image, error) { + var images []Image + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + storeImages, err := store.Images() + if err != nil { + return nil, err + } + images = append(images, storeImages...) + } + return images, nil +} + +func (s *store) Containers() ([]Container, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + + return rcstore.Containers() +} + +func (s *store) Layer(id string) (*Layer, error) { + lstore, err := s.LayerStore() + if err != nil { + return nil, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + layer, err := store.Get(id) + if err == nil { + return layer, nil + } + } + return nil, ErrLayerUnknown +} + +func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) { + adriver, ok := s.graphDriver.(drivers.AdditionalLayerStoreDriver) + if !ok { + return nil, ErrLayerUnknown + } + + al, err := adriver.LookupAdditionalLayer(d, imageref) + if err != nil { + if errors.Is(err, drivers.ErrLayerUnknown) { + return nil, ErrLayerUnknown + } + return nil, err + } + info, err := al.Info() + if err != nil { + return nil, err + } + defer info.Close() + var layer Layer + if err := json.NewDecoder(info).Decode(&layer); err != nil { + return nil, err + } + return &additionalLayer{&layer, al, s}, nil +} + +type additionalLayer struct { + layer *Layer + handler drivers.AdditionalLayer + s *store +} + +func (al *additionalLayer) UncompressedDigest() digest.Digest { + return al.layer.UncompressedDigest +} + +func (al *additionalLayer) CompressedSize() int64 { + return al.layer.CompressedSize +} + +func (al *additionalLayer) PutAs(id, parent string, names []string) (*Layer, error) { + rlstore, err := al.s.LayerStore() + if err != nil { + return nil, err + } + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + rlstores, err := al.s.ROLayerStores() + if err != nil { + return nil, err + } + + var parentLayer *Layer + if parent != "" { + for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { + if lstore != rlstore { + lstore.RLock() + defer lstore.Unlock() + if err := lstore.ReloadIfChanged(); err != nil { + return nil, err + } + } + parentLayer, err = lstore.Get(parent) + if err == nil { + break + } + } + if parentLayer == nil { + return nil, ErrLayerUnknown + } + } + + return rlstore.PutAdditionalLayer(id, parentLayer, names, al.handler) +} + +func (al *additionalLayer) Release() { + al.handler.Release() +} + +func (s *store) Image(id string) (*Image, error) { + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + image, err := store.Get(id) + if err == nil { + return image, nil + } + } + return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) +} + +func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { + images := []*Image{} + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, s := range append([]ROImageStore{istore}, istores...) { + store := s + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + imageList, err := store.Images() + if err != nil { + return nil, err + } + for _, image := range imageList { + if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { + images = append(images, &image) + } + } + } + return images, nil +} + +func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { + images := []*Image{} + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, store := range append([]ROImageStore{istore}, istores...) { + store.RLock() + defer store.Unlock() + if err := store.ReloadIfChanged(); err != nil { + return nil, err + } + imageList, err := store.ByDigest(d) + if err != nil && errors.Cause(err) != ErrImageUnknown { + return nil, err + } + images = append(images, imageList...) + } + return images, nil +} + +func (s *store) Container(id string) (*Container, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + + return rcstore.Get(id) +} + +func (s *store) ContainerLayerID(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + container, err := rcstore.Get(id) + if err != nil { + return "", err + } + return container.LayerID, nil +} + +func (s *store) ContainerByLayer(id string) (*Container, error) { + layer, err := s.Layer(id) + if err != nil { + return nil, err + } + rcstore, err := s.ContainerStore() + if err != nil { + return nil, err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return nil, err + } + containerList, err := rcstore.Containers() + if err != nil { + return nil, err + } + for _, container := range containerList { + if container.LayerID == layer.ID { + return &container, nil + } + } + + return nil, ErrContainerUnknown +} + +func (s *store) ContainerDirectory(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return "", err + } + return gcpath, nil +} + +func (s *store) ContainerRunDirectory(id string) (string, error) { + rcstore, err := s.ContainerStore() + if err != nil { + return "", err + } + + rcstore.RLock() + defer rcstore.Unlock() + if err := rcstore.ReloadIfChanged(); err != nil { + return "", err + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return "", err + } + return rcpath, nil +} + +func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { + dir, err := s.ContainerRunDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) Shutdown(force bool) ([]string, error) { + mounted := []string{} + modified := false + + rlstore, err := s.LayerStore() + if err != nil { + return mounted, err + } + + s.graphLock.Lock() + defer s.graphLock.Unlock() + + rlstore.Lock() + defer rlstore.Unlock() + if err := rlstore.ReloadIfChanged(); err != nil { + return nil, err + } + + layers, err := rlstore.Layers() + if err != nil { + return mounted, err + } + for _, layer := range layers { + if layer.MountCount == 0 { + continue + } + mounted = append(mounted, layer.ID) + if force { + for layer.MountCount > 0 { + _, err2 := rlstore.Unmount(layer.ID, force) + if err2 != nil { + if err == nil { + err = err2 + } + break + } + modified = true + } + } + } + if len(mounted) > 0 && err == nil { + err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted") + } + if err == nil { + err = s.graphDriver.Cleanup() + s.graphLock.Touch() + modified = true + } + if modified { + rlstore.Touch() + } + return mounted, err +} + +// Convert a BigData key name into an acceptable file name. +func makeBigDataBaseName(key string) string { + reader := strings.NewReader(key) + for reader.Len() > 0 { + ch, size, err := reader.ReadRune() + if err != nil || size != 1 { + break + } + if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { + break + } + } + if reader.Len() > 0 { + return "=" + base64.StdEncoding.EncodeToString([]byte(key)) + } + return key +} + +func stringSliceWithoutValue(slice []string, value string) []string { + modified := make([]string, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + +func copyStringSlice(slice []string) []string { + if len(slice) == 0 { + return nil + } + ret := make([]string, len(slice)) + copy(ret, slice) + return ret +} + +func copyStringInt64Map(m map[string]int64) map[string]int64 { + ret := make(map[string]int64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest { + ret := make(map[string]digest.Digest, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +func copyDigestSlice(slice []digest.Digest) []digest.Digest { + if len(slice) == 0 { + return nil + } + ret := make([]digest.Digest, len(slice)) + copy(ret, slice) + return ret +} + +// copyStringInterfaceMap still forces us to assume that the interface{} is +// a non-pointer scalar value +func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { + ret := make(map[string]interface{}, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +// AutoUserNsMinSize is the minimum size for automatically created user namespaces +const AutoUserNsMinSize = 1024 + +// AutoUserNsMaxSize is the maximum size for automatically created user namespaces +const AutoUserNsMaxSize = 65536 + +// RootAutoUserNsUser is the default user used for root containers when automatically +// creating a user namespace. +const RootAutoUserNsUser = "containers" + +// SetDefaultConfigFilePath sets the default configuration to the specified path +func SetDefaultConfigFilePath(path string) { + types.SetDefaultConfigFilePath(path) +} + +// DefaultConfigFile returns the path to the storage config file used +func DefaultConfigFile(rootless bool) (string, error) { + return types.DefaultConfigFile(rootless) +} + +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +func ReloadConfigurationFile(configFile string, storeOptions *types.StoreOptions) { + types.ReloadConfigurationFile(configFile, storeOptions) +} + +// GetDefaultMountOptions returns the default mountoptions defined in container/storage +func GetDefaultMountOptions() ([]string, error) { + defaultStoreOptions := types.Options() + return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions) +} + +// GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions +func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) { + mountOpts := []string{ + ".mountopt", + fmt.Sprintf("%s.mountopt", driver), + } + for _, option := range graphDriverOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + for _, m := range mountOpts { + if m == key { + return strings.Split(val, ","), nil + } + } + } + return nil, nil +} + +// Free removes the store from the list of stores +func (s *store) Free() { + for i := 0; i < len(stores); i++ { + if stores[i] == s { + stores = append(stores[:i], stores[i+1:]...) + return + } + } +} diff --git a/vendor/github.com/containers/storage/types/default_override_test.conf b/vendor/github.com/containers/storage/types/default_override_test.conf new file mode 100644 index 00000000000..caa537ba98b --- /dev/null +++ b/vendor/github.com/containers/storage/types/default_override_test.conf @@ -0,0 +1,11 @@ +[storage] + +# Default Storage Driver +driver = "" + +# Primary Read/Write location of container storage +graphroot = "environment_override_graphroot" + +# Storage path for rootless users +# +rootless_storage_path = "environment_override_rootless_storage_path" diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go new file mode 100644 index 00000000000..ad12ffdbf2d --- /dev/null +++ b/vendor/github.com/containers/storage/types/errors.go @@ -0,0 +1,60 @@ +package types + +import ( + "errors" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID. + ErrContainerUnknown = errors.New("container not known") + // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. + ErrDigestUnknown = errors.New("could not compute digest of item") + // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. + ErrDuplicateID = errors.New("that ID is already in use") + // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. + ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") + // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. + ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") + // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. + ErrDuplicateName = errors.New("that name is already in use") + // ErrImageUnknown indicates that there was no image with the specified name or ID. + ErrImageUnknown = errors.New("image not known") + // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. + ErrImageUsedByContainer = errors.New("image is in use by a container") + // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. + ErrInvalidBigDataName = errors.New("not a valid name for a big data item") + // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. + ErrLayerHasChildren = errors.New("layer has children") + // ErrLayerNotMounted is returned when the requested information can only be computed for a mounted layer, and the layer is not mounted. + ErrLayerNotMounted = errors.New("layer is not mounted") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID. + ErrLayerUnknown = errors.New("layer not known") + // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. + ErrLayerUsedByImage = errors.New("layer is in use by an image") + // ErrLoadError indicates that there was an initialization error. + ErrLoadError = errors.New("error loading storage metadata") + // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. + ErrNotAContainer = errors.New("identifier is not a container") + // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. + ErrNotALayer = errors.New("identifier is not a layer") + // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. + ErrNotAnImage = errors.New("identifier is not an image") + // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. + ErrParentIsContainer = errors.New("would-be parent layer is a container") + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. + ErrParentUnknown = errors.New("parent of layer not known") + // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. + ErrSizeUnknown = errors.New("size is not known") + // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. + ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") + // ErrNotSupported is returned when the requested functionality is not supported. + ErrNotSupported = errors.New("not supported") + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = errors.New("invalid mappings specified") +) diff --git a/vendor/github.com/containers/storage/types/idmappings.go b/vendor/github.com/containers/storage/types/idmappings.go new file mode 100644 index 00000000000..82824ae2b22 --- /dev/null +++ b/vendor/github.com/containers/storage/types/idmappings.go @@ -0,0 +1,102 @@ +package types + +import ( + "fmt" + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" +) + +// AutoUserNsOptions defines how to automatically create a user namespace. +type AutoUserNsOptions struct { + // Size defines the size for the user namespace. If it is set to a + // value bigger than 0, the user namespace will have exactly this size. + // If it is not set, some heuristics will be used to find its size. + Size uint32 + // InitialSize defines the minimum size for the user namespace. + // The created user namespace will have at least this size. + InitialSize uint32 + // PasswdFile to use if the container uses a volume. + PasswdFile string + // GroupFile to use if the container uses a volume. + GroupFile string + // AdditionalUIDMappings specified additional UID mappings to include in + // the generated user namespace. + AdditionalUIDMappings []idtools.IDMap + // AdditionalGIDMappings specified additional GID mappings to include in + // the generated user namespace. + AdditionalGIDMappings []idtools.IDMap +} + +// IDMappingOptions are used for specifying how ID mapping should be set up for +// a layer or container. +type IDMappingOptions struct { + // UIDMap and GIDMap are used for setting up a layer's root filesystem + // for use inside of a user namespace where ID mapping is being used. + // If HostUIDMapping/HostGIDMapping is true, no mapping of the + // respective type will be used. Otherwise, if UIDMap and/or GIDMap + // contain at least one mapping, one or both will be used. By default, + // if neither of those conditions apply, if the layer has a parent + // layer, the parent layer's mapping will be used, and if it does not + // have a parent layer, the mapping which was passed to the Store + // object when it was initialized will be used. + HostUIDMapping bool + HostGIDMapping bool + UIDMap []idtools.IDMap + GIDMap []idtools.IDMap + AutoUserNs bool + AutoUserNsOpts AutoUserNsOptions +} + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) { + options := IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + } + if subGIDMap == "" && subUIDMap != "" { + subGIDMap = subUIDMap + } + if subUIDMap == "" && subGIDMap != "" { + subUIDMap = subGIDMap + } + if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 { + GIDMapSlice = UIDMapSlice + } + if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 { + UIDMapSlice = GIDMapSlice + } + if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 { + UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())} + } + if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 { + GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())} + } + + if subUIDMap != "" && subGIDMap != "" { + mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap) + if err != nil { + return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap) + } + options.UIDMap = mappings.UIDs() + options.GIDMap = mappings.GIDs() + } + parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice) + } + parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID") + if err != nil { + return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice) + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) + options.GIDMap = append(options.GIDMap, parsedGIDMap...) + if len(options.UIDMap) > 0 { + options.HostUIDMapping = false + } + if len(options.GIDMap) > 0 { + options.HostGIDMapping = false + } + return &options, nil +} diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go new file mode 100644 index 00000000000..a71c6d2efd8 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options.go @@ -0,0 +1,446 @@ +package types + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/BurntSushi/toml" + cfg "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/idtools" + "github.com/sirupsen/logrus" +) + +// TOML-friendly explicit tables used for conversions. +type TomlConfig struct { + Storage struct { + Driver string `toml:"driver,omitempty"` + RunRoot string `toml:"runroot,omitempty"` + GraphRoot string `toml:"graphroot,omitempty"` + RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + Options cfg.OptionsConfig `toml:"options,omitempty"` + } `toml:"storage"` +} + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) + +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" +) + +func init() { + defaultStoreOptions.RunRoot = defaultRunRoot + defaultStoreOptions.GraphRoot = defaultGraphRoot + defaultStoreOptions.GraphDriverName = "" + + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions) + } else { + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + } + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } +} + +// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. +// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function. +func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) { + var ( + defaultRootlessRunRoot string + defaultRootlessGraphRoot string + err error + ) + storageOpts := defaultStoreOptions + if rootless && rootlessUID != 0 { + storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) + if err != nil { + return storageOpts, err + } + } + _, err = os.Stat(storageConf) + if err != nil && !os.IsNotExist(err) { + return storageOpts, err + } + if err == nil && !defaultConfigFileSet { + defaultRootlessRunRoot = storageOpts.RunRoot + defaultRootlessGraphRoot = storageOpts.GraphRoot + storageOpts = StoreOptions{} + reloadConfigurationFileIfNeeded(storageConf, &storageOpts) + if rootless && rootlessUID != 0 { + // If the file did not specify a graphroot or runroot, + // set sane defaults so we don't try and use root-owned + // directories + if storageOpts.RunRoot == "" { + storageOpts.RunRoot = defaultRootlessRunRoot + } + if storageOpts.GraphRoot == "" { + if storageOpts.RootlessStoragePath != "" { + storageOpts.GraphRoot = storageOpts.RootlessStoragePath + } else { + storageOpts.GraphRoot = defaultRootlessGraphRoot + } + } + } + } + if storageOpts.RunRoot != "" { + runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.RunRoot = runRoot + } + if storageOpts.GraphRoot != "" { + graphRoot, err := expandEnvPath(storageOpts.GraphRoot, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.GraphRoot = graphRoot + } + if storageOpts.RootlessStoragePath != "" { + storagePath, err := expandEnvPath(storageOpts.RootlessStoragePath, rootlessUID) + if err != nil { + return storageOpts, err + } + storageOpts.RootlessStoragePath = storagePath + } + + return storageOpts, nil +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { + storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0) + if err != nil { + return defaultStoreOptions, err + } + return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf) +} + +// StoreOptions is used for passing initialization options to GetStore(), for +// initializing a Store object and the underlying storage that it controls. +type StoreOptions struct { + // RunRoot is the filesystem path under which we can store run-time + // information, such as the locations of active mount points, that we + // want to lose if the host is rebooted. + RunRoot string `json:"runroot,omitempty"` + // GraphRoot is the filesystem path under which we will store the + // contents of layers, images, and containers. + GraphRoot string `json:"root,omitempty"` + // RootlessStoragePath is the storage path for rootless users + // default $HOME/.local/share/containers/storage + RootlessStoragePath string `toml:"rootless_storage_path"` + // GraphDriverName is the underlying storage driver that we'll be + // using. It only needs to be specified the first time a Store is + // initialized for a given RunRoot and GraphRoot. + GraphDriverName string `json:"driver,omitempty"` + // GraphDriverOptions are driver-specific options. + GraphDriverOptions []string `json:"driver-options,omitempty"` + // UIDMap and GIDMap are used for setting up a container's root filesystem + // for use inside of a user namespace where UID mapping is being used. + UIDMap []idtools.IDMap `json:"uidmap,omitempty"` + GIDMap []idtools.IDMap `json:"gidmap,omitempty"` + // RootAutoNsUser is the user used to pick a subrange when automatically setting + // a user namespace for the root user. + RootAutoNsUser string `json:"root_auto_ns_user,omitempty"` + // AutoNsMinSize is the minimum size for an automatic user namespace. + AutoNsMinSize uint32 `json:"auto_userns_min_size,omitempty"` + // AutoNsMaxSize is the maximum size for an automatic user namespace. + AutoNsMaxSize uint32 `json:"auto_userns_max_size,omitempty"` + // PullOptions specifies options to be handed to pull managers + // This API is experimental and can be changed without bumping the major version number. + PullOptions map[string]string `toml:"pull_options"` + // DisableVolatile doesn't allow volatile mounts when it is set. + DisableVolatile bool `json:"disable-volatile,omitempty"` +} + +// isRootlessDriver returns true if the given storage driver is valid for containers running as non root +func isRootlessDriver(driver string) bool { + validDrivers := map[string]bool{ + "btrfs": true, + "overlay": true, + "overlay2": true, + "vfs": true, + } + return validDrivers[driver] +} + +// getRootlessStorageOpts returns the storage opts for containers running as non root +func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { + var opts StoreOptions + + dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) + if err != nil { + return opts, err + } + opts.RunRoot = rootlessRuntime + if systemOpts.RootlessStoragePath != "" { + opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) + if err != nil { + return opts, err + } + } else { + opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") + } + + if driver := systemOpts.GraphDriverName; isRootlessDriver(driver) { + opts.GraphDriverName = driver + } + if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { + opts.GraphDriverName = driver + } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + opts.GraphDriverName = overlayDriver + } + + if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break + } + } + } + if opts.GraphDriverName == "" { + opts.GraphDriverName = "vfs" + } + + if os.Getenv("STORAGE_OPTS") != "" { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) + } + + return opts, nil +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { + uid := getRootlessUID() + return DefaultStoreOptions(uid != 0, uid) +} + +var prevReloadConfig = struct { + storeOptions *StoreOptions + mod time.Time + mutex sync.Mutex + configFile string +}{} + +// SetDefaultConfigFilePath sets the default configuration to the specified path +func SetDefaultConfigFilePath(path string) { + defaultConfigFile = path + defaultConfigFileSet = true + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) +} + +func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { + prevReloadConfig.mutex.Lock() + defer prevReloadConfig.mutex.Unlock() + + fi, err := os.Stat(configFile) + if err != nil { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + } + return + } + + mtime := fi.ModTime() + if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + *storeOptions = *prevReloadConfig.storeOptions + return + } + + ReloadConfigurationFile(configFile, storeOptions) + + prevReloadConfig.storeOptions = storeOptions + prevReloadConfig.mod = mtime + prevReloadConfig.configFile = configFile +} + +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { + config := new(TomlConfig) + + meta, err := toml.DecodeFile(configFile, &config) + if err == nil { + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Warningf("Failed to decode the keys %q from %q.", keys, configFile) + } + } else { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + return + } + } + + // Clear storeOptions of previous settings + *storeOptions = StoreOptions{} + if config.Storage.Driver != "" { + storeOptions.GraphDriverName = config.Storage.Driver + } + if os.Getenv("STORAGE_DRIVER") != "" { + config.Storage.Driver = os.Getenv("STORAGE_DRIVER") + storeOptions.GraphDriverName = config.Storage.Driver + } + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + storeOptions.GraphDriverName = overlayDriver + } + if storeOptions.GraphDriverName == "" { + logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) + } + if config.Storage.RunRoot != "" { + storeOptions.RunRoot = config.Storage.RunRoot + } + if config.Storage.GraphRoot != "" { + storeOptions.GraphRoot = config.Storage.GraphRoot + } + if config.Storage.RootlessStoragePath != "" { + storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath + } + for _, s := range config.Storage.Options.AdditionalImageStores { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) + } + for _, s := range config.Storage.Options.AdditionalLayerStores { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.additionallayerstore=%s", config.Storage.Driver, s)) + } + if config.Storage.Options.Size != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + } + if config.Storage.Options.MountProgram != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) + } + if config.Storage.Options.SkipMountHome != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + } + if config.Storage.Options.IgnoreChownErrors != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors)) + } + if config.Storage.Options.ForceMask != 0 { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.force_mask=%o", config.Storage.Driver, config.Storage.Options.ForceMask)) + } + if config.Storage.Options.MountOpt != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt)) + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { + config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser + } + if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" { + config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup + } + if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" { + mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup) + if err != nil { + fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) + return + } + storeOptions.UIDMap = mappings.UIDs() + storeOptions.GIDMap = mappings.GIDs() + } + + uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids") + if err != nil { + fmt.Print(err) + } else { + storeOptions.UIDMap = uidmap + } + gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids") + if err != nil { + fmt.Print(err) + } else { + storeOptions.GIDMap = gidmap + } + storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser + if config.Storage.Options.AutoUsernsMinSize > 0 { + storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize + } + if config.Storage.Options.AutoUsernsMaxSize > 0 { + storeOptions.AutoNsMaxSize = config.Storage.Options.AutoUsernsMaxSize + } + if config.Storage.Options.PullOptions != nil { + storeOptions.PullOptions = config.Storage.Options.PullOptions + } + + storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile + + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) + + if opts, ok := os.LookupEnv("STORAGE_OPTS"); ok { + storeOptions.GraphDriverOptions = strings.Split(opts, ",") + } + if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { + storeOptions.GraphDriverOptions = nil + } +} + +func Options() StoreOptions { + return defaultStoreOptions +} + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + + if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { + return err + } + + f, err := os.Create(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) +} + +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/containers/storage/types/storage_broken.conf b/vendor/github.com/containers/storage/types/storage_broken.conf new file mode 100644 index 00000000000..3bca1d97847 --- /dev/null +++ b/vendor/github.com/containers/storage/types/storage_broken.conf @@ -0,0 +1,18 @@ +# This file is is a TEST configuration file for all tools +# that use the containers/storage library. +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +foo = "bar" + +[storage] + +# Default Storage Driver +driver = "" + +# Temporary storage location +runroot = "/run/containers/test" + +[storage.options] +# Primary Read/Write location of container storage +graphroot = "/var/lib/containers/storage" + diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf new file mode 100644 index 00000000000..9b682fe159c --- /dev/null +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -0,0 +1,35 @@ +# This file is is a TEST configuration file for all tools +# that use the containers/storage library. +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver +driver = "" + +# Temporary storage location +runroot = "$HOME/$UID/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "$HOME/$UID/containers/storage" + +# Storage path for rootless users +# +rootless_storage_path = "$HOME/$UID/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +[storage.options.overlay] + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + + +[storage.options.thinpool] +# Storage Options for thinpool diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go new file mode 100644 index 00000000000..4dd1a786ede --- /dev/null +++ b/vendor/github.com/containers/storage/types/utils.go @@ -0,0 +1,213 @@ +package types + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUID int) (string, error) { + path, err := getRootlessRuntimeDir(rootlessUID) + if err != nil { + return "", err + } + path = filepath.Join(path, "containers") + if err := os.MkdirAll(path, 0700); err != nil { + return "", errors.Wrapf(err, "unable to make rootless runtime") + } + return path, nil +} + +type rootlessRuntimeDirEnvironment interface { + getProcCommandFile() string + getRunUserDir() string + getTmpPerUserDir() string + + homeDirGetRuntimeDir() (string, error) + systemLstat(string) (*system.StatT, error) + homedirGet() string +} + +type rootlessRuntimeDirEnvironmentImplementation struct { + procCommandFile string + runUserDir string + tmpPerUserDir string +} + +func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { + return env.procCommandFile +} +func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { + return env.runUserDir +} +func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { + return env.tmpPerUserDir +} +func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { + return homedir.GetRuntimeDir() +} +func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { + return system.Lstat(path) +} +func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { + return homedir.Get() +} + +func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { + st, err := env.systemLstat(dir) + return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 +} + +// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. +// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function. +func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) { + runtimeDir, err := env.homeDirGetRuntimeDir() + if err == nil { + return runtimeDir, nil + } + + initCommand, err := ioutil.ReadFile(env.getProcCommandFile()) + if err != nil || string(initCommand) == "systemd" { + runUserDir := env.getRunUserDir() + if isRootlessRuntimeDirOwner(runUserDir, env) { + return runUserDir, nil + } + } + + tmpPerUserDir := env.getTmpPerUserDir() + if tmpPerUserDir != "" { + if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { + if err := os.Mkdir(tmpPerUserDir, 0700); err != nil { + logrus.Errorf("Failed to create temp directory for user: %v", err) + } else { + return tmpPerUserDir, nil + } + } else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) { + return tmpPerUserDir, nil + } + } + + homeDir := env.homedirGet() + if homeDir == "" { + return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty") + } + resolvedHomeDir, err := filepath.EvalSymlinks(homeDir) + if err != nil { + return "", err + } + return filepath.Join(resolvedHomeDir, "rundir"), nil +} + +func getRootlessRuntimeDir(rootlessUID int) (string, error) { + return getRootlessRuntimeDirIsolated( + rootlessRuntimeDirEnvironmentImplementation{ + "/proc/1/comm", + fmt.Sprintf("/run/user/%d", rootlessUID), + fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID), + }, + ) +} + +// getRootlessDirInfo returns the parent path of where the storage for containers and +// volumes will be in rootless mode +func getRootlessDirInfo(rootlessUID int) (string, string, error) { + rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID) + if err != nil { + return "", "", err + } + + dataDir, err := homedir.GetDataHome() + if err == nil { + return dataDir, rootlessRuntime, nil + } + + home := homedir.Get() + if home == "" { + return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty") + } + // runc doesn't like symlinks in the rootfs path, and at least + // on CoreOS /home is a symlink to /var/home, so resolve any symlink. + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + return "", "", err + } + dataDir = filepath.Join(resolvedHome, ".local", "share") + + return dataDir, rootlessRuntime, nil +} + +func getRootlessUID() int { + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +func expandEnvPath(path string, rootlessUID int) (string, error) { + var err error + path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1) + path = os.ExpandEnv(path) + newpath, err := filepath.EvalSymlinks(path) + if err != nil { + newpath = filepath.Clean(path) + } + return newpath, nil +} + +func DefaultConfigFile(rootless bool) (string, error) { + if defaultConfigFileSet { + return defaultConfigFile, nil + } + + if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { + return path, nil + } + if !rootless { + return defaultConfigFile, nil + } + + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, "containers/storage.conf"), nil + } + home := homedir.Get() + if home == "" { + return "", errors.New("cannot determine user's homedir") + } + return filepath.Join(home, ".config/containers/storage.conf"), nil +} + +func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) { + prevReloadConfig.mutex.Lock() + defer prevReloadConfig.mutex.Unlock() + + fi, err := os.Stat(configFile) + if err != nil { + if !os.IsNotExist(err) { + fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) + } + return + } + + mtime := fi.ModTime() + if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile { + *storeOptions = *prevReloadConfig.storeOptions + return + } + + ReloadConfigurationFile(configFile, storeOptions) + + prevReloadConfig.storeOptions = storeOptions + prevReloadConfig.mod = mtime + prevReloadConfig.configFile = configFile +} diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go new file mode 100644 index 00000000000..523c92dc8b6 --- /dev/null +++ b/vendor/github.com/containers/storage/userns.go @@ -0,0 +1,302 @@ +package storage + +import ( + "os" + "os/user" + "path/filepath" + "strconv" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" + "github.com/containers/storage/types" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// getAdditionalSubIDs looks up the additional IDs configured for +// the specified user. +// The argument USERNAME is ignored for rootless users, as it is not +// possible to use an arbitrary entry in /etc/sub*id. +// Differently, if the username is not specified for root users, a +// default name is used. +func getAdditionalSubIDs(username string) (*idSet, *idSet, error) { + var uids, gids *idSet + + if unshare.IsRootless() { + username = os.Getenv("USER") + if username == "" { + var id string + if os.Geteuid() == 0 { + id = strconv.Itoa(unshare.GetRootlessUID()) + } else { + id = strconv.Itoa(os.Geteuid()) + } + userID, err := user.LookupId(id) + if err == nil { + username = userID.Username + } + } + } else if username == "" { + username = RootAutoUserNsUser + } + mappings, err := idtools.NewIDMappings(username, username) + if err != nil { + logrus.Errorf("Cannot find mappings for user %q: %v", username, err) + } else { + uids = getHostIDs(mappings.UIDs()) + gids = getHostIDs(mappings.GIDs()) + } + return uids, gids, nil +} + +// getAvailableIDs returns the list of ranges that are usable by the current user. +// When running as root, it looks up the additional IDs assigned to the specified user. +// When running as rootless, the mappings assigned to the unprivileged user are converted +// to the IDs inside of the initial rootless user namespace. +func (s *store) getAvailableIDs() (*idSet, *idSet, error) { + if s.additionalUIDs == nil { + uids, gids, err := getAdditionalSubIDs(s.autoUsernsUser) + if err != nil { + return nil, nil, err + } + // Store the result so we don't need to look it up again next time + s.additionalUIDs, s.additionalGIDs = uids, gids + } + + if !unshare.IsRootless() { + // No mapping to inner namespace needed + return s.additionalUIDs, s.additionalGIDs, nil + } + + // We are already inside of the rootless user namespace. + // We need to remap the configured mappings to what is available + // inside of the rootless userns. + u := newIDSet([]interval{{start: 1, end: s.additionalUIDs.size() + 1}}) + g := newIDSet([]interval{{start: 1, end: s.additionalGIDs.size() + 1}}) + return u, g, nil +} + +// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and +// /etc/group files. +func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 { + if passwdFile == "" { + passwdFile = filepath.Join(containerMount, "etc/passwd") + } + if groupFile == "" { + groupFile = filepath.Join(groupFile, "etc/group") + } + + size := 0 + + users, err := libcontainerUser.ParsePasswdFile(passwdFile) + if err == nil { + for _, u := range users { + // Skip the "nobody" user otherwise we end up with 65536 + // ids with most images + if u.Name == "nobody" { + continue + } + if u.Uid > size { + size = u.Uid + } + if u.Gid > size { + size = u.Gid + } + } + } + + groups, err := libcontainerUser.ParseGroupFile(groupFile) + if err == nil { + for _, g := range groups { + if g.Name == "nobody" { + continue + } + if g.Gid > size { + size = g.Gid + } + } + } + + return uint32(size) +} + +// getMaxSizeFromImage returns the maximum ID used by the specified image. +// The layer stores must be already locked. +func (s *store) getMaxSizeFromImage(id string, image *Image, passwdFile, groupFile string) (uint32, error) { + lstore, err := s.LayerStore() + if err != nil { + return 0, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return 0, err + } + + size := uint32(0) + + var topLayer *Layer + layerName := image.TopLayer +outer: + for { + for _, ls := range append([]ROLayerStore{lstore}, lstores...) { + layer, err := ls.Get(layerName) + if err != nil { + continue + } + if image.TopLayer == layerName { + topLayer = layer + } + for _, uid := range layer.UIDs { + if uid >= size { + size = uid + 1 + } + } + for _, gid := range layer.GIDs { + if gid >= size { + size = gid + 1 + } + } + layerName = layer.Parent + if layerName == "" { + break outer + } + continue outer + } + return 0, errors.Errorf("cannot find layer %q", layerName) + } + + rlstore, err := s.LayerStore() + if err != nil { + return 0, err + } + + layerOptions := &LayerOptions{ + IDMappingOptions: types.IDMappingOptions{ + HostUIDMapping: true, + HostGIDMapping: true, + UIDMap: nil, + GIDMap: nil, + }, + } + + // We need to create a temporary layer so we can mount it and lookup the + // maximum IDs used. + clayer, err := rlstore.Create(id, topLayer, nil, "", nil, layerOptions, false) + if err != nil { + return 0, err + } + defer rlstore.Delete(clayer.ID) + + mountOptions := drivers.MountOpts{ + MountLabel: "", + UidMaps: nil, + GidMaps: nil, + Options: nil, + } + + mountpoint, err := rlstore.Mount(clayer.ID, mountOptions) + if err != nil { + return 0, err + } + defer rlstore.Unmount(clayer.ID, true) + + userFilesSize := parseMountedFiles(mountpoint, passwdFile, groupFile) + if userFilesSize > size { + size = userFilesSize + } + + return size, nil +} + +// getAutoUserNS creates an automatic user namespace +func (s *store) getAutoUserNS(id string, options *types.AutoUserNsOptions, image *Image) ([]idtools.IDMap, []idtools.IDMap, error) { + requestedSize := uint32(0) + initialSize := uint32(1) + if options.Size > 0 { + requestedSize = options.Size + } + if options.InitialSize > 0 { + initialSize = options.InitialSize + } + + availableUIDs, availableGIDs, err := s.getAvailableIDs() + if err != nil { + return nil, nil, errors.Wrapf(err, "cannot read mappings") + } + + // Look every container that is using a user namespace and store + // the intervals that are already used. + containers, err := s.Containers() + if err != nil { + return nil, nil, err + } + var usedUIDs, usedGIDs []idtools.IDMap + for _, c := range containers { + usedUIDs = append(usedUIDs, c.UIDMap...) + usedGIDs = append(usedGIDs, c.GIDMap...) + } + + size := requestedSize + + // If there is no requestedSize, lookup the maximum used IDs in the layers + // metadata. Make sure the size is at least s.autoNsMinSize and it is not + // bigger than s.autoNsMaxSize. + // This is a best effort heuristic. + if requestedSize == 0 { + size = initialSize + if s.autoNsMinSize > size { + size = s.autoNsMinSize + } + if image != nil { + sizeFromImage, err := s.getMaxSizeFromImage(id, image, options.PasswdFile, options.GroupFile) + if err != nil { + return nil, nil, err + } + if sizeFromImage > size { + size = sizeFromImage + } + } + if s.autoNsMaxSize > 0 && size > s.autoNsMaxSize { + return nil, nil, errors.Errorf("the container needs a user namespace with size %q that is bigger than the maximum value allowed with userns=auto %q", size, s.autoNsMaxSize) + } + } + + return getAutoUserNSIDMappings( + int(size), + availableUIDs, availableGIDs, + usedUIDs, usedGIDs, + options.AdditionalUIDMappings, options.AdditionalGIDMappings, + ) +} + +// getAutoUserNSIDMappings computes the user/group id mappings for the automatic user namespace. +func getAutoUserNSIDMappings( + size int, + availableUIDs, availableGIDs *idSet, + usedUIDMappings, usedGIDMappings, additionalUIDMappings, additionalGIDMappings []idtools.IDMap, +) ([]idtools.IDMap, []idtools.IDMap, error) { + usedUIDs := getHostIDs(append(usedUIDMappings, additionalUIDMappings...)) + usedGIDs := getHostIDs(append(usedGIDMappings, additionalGIDMappings...)) + + // Exclude additional uids and gids from requested range. + targetIDs := newIDSet([]interval{{start: 0, end: size}}) + requestedContainerUIDs := targetIDs.subtract(getContainerIDs(additionalUIDMappings)) + requestedContainerGIDs := targetIDs.subtract(getContainerIDs(additionalGIDMappings)) + + // Make sure the specified additional IDs are not used as part of the automatic + // mapping + availableUIDs, err := availableUIDs.subtract(usedUIDs).findAvailable(requestedContainerUIDs.size()) + if err != nil { + return nil, nil, err + } + availableGIDs, err = availableGIDs.subtract(usedGIDs).findAvailable(requestedContainerGIDs.size()) + if err != nil { + return nil, nil, err + } + + uidMap := append(availableUIDs.zip(requestedContainerUIDs), additionalUIDMappings...) + gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...) + return uidMap, gidMap, nil +} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go new file mode 100644 index 00000000000..37d4b79b01b --- /dev/null +++ b/vendor/github.com/containers/storage/utils.go @@ -0,0 +1,74 @@ +package storage + +import ( + "fmt" + + "github.com/containers/storage/types" +) + +// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping +func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*types.IDMappingOptions, error) { + return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap) +} + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir(rootlessUID int) (string, error) { + return types.GetRootlessRuntimeDir(rootlessUID) +} + +// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers +func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) { + return types.DefaultStoreOptionsAutoDetectUID() +} + +// DefaultStoreOptions returns the default storage ops for containers +func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) { + return types.DefaultStoreOptions(rootless, rootlessUID) +} + +func validateMountOptions(mountOptions []string) error { + var Empty struct{} + // Add invalid options for ImageMount() here. + invalidOptions := map[string]struct{}{ + "rw": Empty, + } + + for _, opt := range mountOptions { + if _, ok := invalidOptions[opt]; ok { + return fmt.Errorf(" %q option not supported", opt) + } + } + return nil +} + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + var result []string + switch op { + case setNames: + // ignore all old names and just return new names + result = opParameters + case removeNames: + // remove given names from old names + result = make([]string, 0, len(oldNames)) + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + case addNames: + result = make([]string, 0, len(opParameters)+len(oldNames)) + result = append(result, opParameters...) + result = append(result, oldNames...) + default: + return result, errInvalidUpdateNameOperation + } + return dedupeNames(result), nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go new file mode 100644 index 00000000000..cff5af1a64c --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -0,0 +1,261 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "context" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus/v5" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subStateSubscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } +} + +// Deprecated: use NewWithContext instead. +func New() (*Conn, error) { + return NewWithContext(context.Background()) +} + +// NewWithContext establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func NewWithContext(ctx context.Context) (*Conn, error) { + conn, err := NewSystemConnectionContext(ctx) + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnectionContext(ctx) + } + return conn, err +} + +// Deprecated: use NewSystemConnectionContext instead. +func NewSystemConnection() (*Conn, error) { + return NewSystemConnectionContext(context.Background()) +} + +// NewSystemConnectionContext establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. +func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) + }) +} + +// Deprecated: use NewUserConnectionContext instead. +func NewUserConnection() (*Conn, error) { + return NewUserConnectionContext(context.Background()) +} + +// NewUserConnectionContext establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) + }) +} + +// Deprecated: use NewSystemdConnectionContext instead. +func NewSystemdConnection() (*Conn, error) { + return NewSystemdConnectionContext(context.Background()) +} + +// NewSystemdConnectionContext establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private", opts...) + }) + }) +} + +// Close closes an established connection. +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus(dbus.WithContext(ctx)) + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(ctx, createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go new file mode 100644 index 00000000000..fa04afc708e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -0,0 +1,830 @@ +// Copyright 2015, 2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "context" + "errors" + "fmt" + "path" + "strconv" + + "github.com/godbus/dbus/v5" +) + +// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API +type Who string + +const ( + // All sends the signal to all processes in the unit + All Who = "all" + // Main sends the signal to the main process of the unit + Main Who = "main" + // Control sends the signal to the control process of the unit + Control Who = "control" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// Deprecated: use StartUnitContext instead. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StartUnitContext(context.Background(), name, mode, ch) +} + +// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// Deprecated: use StopUnitContext instead. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StopUnitContext(context.Background(), name, mode, ch) +} + +// StopUnitContext is similar to StartUnitContext, but stops the specified unit +// rather than starting it. +func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// Deprecated: use ReloadUnitContext instead. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadUnitContext(context.Background(), name, mode, ch) +} + +// ReloadUnitContext reloads a unit. Reloading is done only if the unit +// is already running, and fails otherwise. +func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// Deprecated: use RestartUnitContext instead. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.RestartUnitContext(context.Background(), name, mode, ch) +} + +// RestartUnitContext restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// Deprecated: use TryRestartUnitContext instead. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.TryRestartUnitContext(context.Background(), name, mode, ch) +} + +// TryRestartUnitContext is like RestartUnitContext, except that a service that +// isn't running is not affected by the restart. +func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrRestartUnitContext instead. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use +// a restart otherwise. +func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrTryRestartUnitContext instead. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, +// and use a "Try" flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// Deprecated: use StartTransientUnitContext instead. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) +} + +// StartTransientUnitContext may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnitContext, properties contains properties +// of the unit. +func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// Deprecated: use KillUnitContext instead. +func (c *Conn) KillUnit(name string, signal int32) { + c.KillUnitContext(context.Background(), name, signal) +} + +// KillUnitContext takes the unit name and a UNIX signal number to send. +// All of the unit's processes are killed. +func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { + c.KillUnitWithTarget(ctx, name, All, signal) +} + +// KillUnitWithTarget is like KillUnitContext, but allows you to specify which +// process in the unit to send the signal to. +func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() +} + +// Deprecated: use ResetFailedUnitContext instead. +func (c *Conn) ResetFailedUnit(name string) error { + return c.ResetFailedUnitContext(context.Background(), name) +} + +// ResetFailedUnitContext resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// Deprecated: use SystemStateContext instead. +func (c *Conn) SystemState() (*Property, error) { + return c.SystemStateContext(context.Background()) +} + +// SystemStateContext returns the systemd state. Equivalent to +// systemctl is-system-running. +func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. +func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + if !path.IsValid() { + return nil, fmt.Errorf("invalid unit name: %v", path) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// Deprecated: use GetUnitPropertiesContext instead. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.GetUnitPropertiesContext(context.Background(), unit) +} + +// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetUnitPathPropertiesContext instead. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.GetUnitPathPropertiesContext(context.Background(), path) +} + +// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all +// of its dbus object properties. +func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetAllPropertiesContext instead. +func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { + return c.GetAllPropertiesContext(context.Background(), unit) +} + +// GetAllPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "") +} + +func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +// Deprecated: use GetUnitPropertyContext instead. +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.GetUnitPropertyContext(context.Background(), unit, propertyName) +} + +// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, +// and returns the property value. +func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// Deprecated: use GetServicePropertyContext instead. +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.GetServicePropertyContext(context.Background(), service, propertyName) +} + +// GetServiceProperty returns property for given service name and property name. +func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { + return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) +} + +// Deprecated: use GetUnitTypePropertiesContext instead. +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) +} + +// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. +// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. +func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) +} + +// Deprecated: use SetUnitPropertiesContext instead. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) +} + +// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +// Deprecated: use GetUnitTypePropertyContext instead. +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) +} + +// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, +// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. +func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// Deprecated: use ListUnitsContext instead. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.ListUnitsContext(context.Background()) +} + +// ListUnitsContext returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. +func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// Deprecated: use ListUnitsFilteredContext instead. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.ListUnitsFilteredContext(context.Background(), states) +} + +// ListUnitsFilteredContext returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// Deprecated: use ListUnitsByPatternsContext instead. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.ListUnitsByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitsByPatternsContext returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// Deprecated: use ListUnitsByNamesContext instead. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.ListUnitsByNamesContext(context.Background(), units) +} + +// ListUnitsByNamesContext returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +// +// Requires systemd v230 or higher. +func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// Deprecated: use ListUnitFilesContext instead. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.ListUnitFilesContext(context.Background()) +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// Deprecated: use ListUnitFilesByPatternsContext instead. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// Deprecated: use LinkUnitFilesContext instead. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + return c.LinkUnitFilesContext(context.Background(), files, runtime, force) +} + +// LinkUnitFilesContext links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. +// +// The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// Deprecated: use EnableUnitFilesContext instead. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + return c.EnableUnitFilesContext(context.Background(), files, runtime, force) +} + +// EnableUnitFilesContext may be used to enable one or more units in the system +// (by creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use DisableUnitFilesContext instead. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + return c.DisableUnitFilesContext(context.Background(), files, runtime) +} + +// DisableUnitFilesContext may be used to disable one or more units in the +// system (by removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use MaskUnitFilesContext instead. +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + return c.MaskUnitFilesContext(context.Background(), files, runtime, force) +} + +// MaskUnitFilesContext masks one or more units in the system. +// +// The files argument contains a list of units to mask (either just file names +// or full absolute paths if the unit files are residing outside the usual unit +// search paths). +// +// The runtime argument is used to specify whether the unit was enabled for +// runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use UnmaskUnitFilesContext instead. +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + return c.UnmaskUnitFilesContext(context.Background(), files, runtime) +} + +// UnmaskUnitFilesContext unmasks one or more units in the system. +// +// It takes the list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside the usual unit search +// paths), and a boolean runtime flag to specify whether the unit was enabled +// for runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use ReloadContext instead. +func (c *Conn) Reload() error { + return c.ReloadContext(context.Background()) +} + +// ReloadContext instructs systemd to scan for and reload unit files. This is +// an equivalent to systemctl daemon-reload. +func (c *Conn) ReloadContext(ctx context.Context) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} + +// unitName returns the unescaped base element of the supplied escaped path. +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} + +// JobStatus holds a currently queued job definition. +type JobStatus struct { + Id uint32 // The numeric job id + Unit string // The primary unit name for this job + JobType string // The job type as string + Status string // The job state as string + JobPath dbus.ObjectPath // The job object path + UnitPath dbus.ObjectPath // The unit object path +} + +// Deprecated: use ListJobsContext instead. +func (c *Conn) ListJobs() ([]JobStatus, error) { + return c.ListJobsContext(context.Background()) +} + +// ListJobsContext returns an array with all currently queued jobs. +func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { + return c.listJobsInternal(ctx) +} + +func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { + result := make([][]interface{}, 0) + if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]JobStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + if err := dbus.Store(resultInterface, statusInterface...); err != nil { + return nil, err + } + + return status, nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go new file mode 100644 index 00000000000..fb42b627338 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus/v5" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + { + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go new file mode 100644 index 00000000000..17c5d485657 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go new file mode 100644 index 00000000000..7e370fea212 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go @@ -0,0 +1,333 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "log" + "time" + + "github.com/godbus/dbus/v5" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// SubscribeUnits returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } + + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { + return + } + + info, err := c.GetUnitPathProperties(unitPath) + if err != nil { + select { + case c.subStateSubscriber.errCh <- err: + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + defer c.updateIgnore(unitPath, info) + + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + update := &SubStateUpdate{name, substate} + select { + case c.subStateSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subStateSubscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + loadState, ok := info["LoadState"].(string) + if !ok { + return + } + + // unit is unloaded - it will trigger bad systemd dbus behavior + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subStateSubscriber.ignore { + if t < now { + delete(c.subStateSubscriber.ignore, p) + } + } + } +} + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go new file mode 100644 index 00000000000..5b408d5847a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/coreos/go-systemd/v22/internal/dlopen/dlopen.go b/vendor/github.com/coreos/go-systemd/v22/internal/dlopen/dlopen.go new file mode 100644 index 00000000000..23774f612e0 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/internal/dlopen/dlopen.go @@ -0,0 +1,82 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dlopen provides some convenience functions to dlopen a library and +// get its symbols. +package dlopen + +// #cgo LDFLAGS: -ldl +// #include +// #include +import "C" +import ( + "errors" + "fmt" + "unsafe" +) + +var ErrSoNotFound = errors.New("unable to open a handle to the library") + +// LibHandle represents an open handle to a library (.so) +type LibHandle struct { + Handle unsafe.Pointer + Libname string +} + +// GetHandle tries to get a handle to a library (.so), attempting to access it +// by the names specified in libs and returning the first that is successfully +// opened. Callers are responsible for closing the handler. If no library can +// be successfully opened, an error is returned. +func GetHandle(libs []string) (*LibHandle, error) { + for _, name := range libs { + libname := C.CString(name) + defer C.free(unsafe.Pointer(libname)) + handle := C.dlopen(libname, C.RTLD_LAZY) + if handle != nil { + h := &LibHandle{ + Handle: handle, + Libname: name, + } + return h, nil + } + } + return nil, ErrSoNotFound +} + +// GetSymbolPointer takes a symbol name and returns a pointer to the symbol. +func (l *LibHandle) GetSymbolPointer(symbol string) (unsafe.Pointer, error) { + sym := C.CString(symbol) + defer C.free(unsafe.Pointer(sym)) + + C.dlerror() + p := C.dlsym(l.Handle, sym) + e := C.dlerror() + if e != nil { + return nil, fmt.Errorf("error resolving symbol %q: %v", symbol, errors.New(C.GoString(e))) + } + + return p, nil +} + +// Close closes a LibHandle. +func (l *LibHandle) Close() error { + C.dlerror() + C.dlclose(l.Handle) + e := C.dlerror() + if e != nil { + return fmt.Errorf("error closing %v: %v", l.Libname, errors.New(C.GoString(e))) + } + + return nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/sdjournal/functions.go b/vendor/github.com/coreos/go-systemd/v22/sdjournal/functions.go new file mode 100644 index 00000000000..3cbd0565883 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/sdjournal/functions.go @@ -0,0 +1,66 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdjournal + +import ( + "github.com/coreos/go-systemd/v22/internal/dlopen" + "sync" + "unsafe" +) + +var ( + // lazy initialized + libsystemdHandle *dlopen.LibHandle + + libsystemdMutex = &sync.Mutex{} + libsystemdFunctions = map[string]unsafe.Pointer{} + libsystemdNames = []string{ + // systemd < 209 + "libsystemd-journal.so.0", + "libsystemd-journal.so", + + // systemd >= 209 merged libsystemd-journal into libsystemd proper + "libsystemd.so.0", + "libsystemd.so", + } +) + +func getFunction(name string) (unsafe.Pointer, error) { + libsystemdMutex.Lock() + defer libsystemdMutex.Unlock() + + if libsystemdHandle == nil { + h, err := dlopen.GetHandle(libsystemdNames) + if err != nil { + return nil, err + } + + libsystemdHandle = h + } + + f, ok := libsystemdFunctions[name] + if !ok { + var err error + f, err = libsystemdHandle.GetSymbolPointer(name) + if err != nil { + return nil, err + } + + libsystemdFunctions[name] = f + } + + return f, nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go b/vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go new file mode 100644 index 00000000000..fb11b1179c8 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/sdjournal/journal.go @@ -0,0 +1,1169 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sdjournal provides a low-level Go interface to the +// systemd journal wrapped around the sd-journal C API. +// +// All public read methods map closely to the sd-journal API functions. See the +// sd-journal.h documentation[1] for information about each function. +// +// To write to the journal, see the pure-Go "journal" package +// +// [1] http://www.freedesktop.org/software/systemd/man/sd-journal.html +package sdjournal + +// #include +// #include +// #include +// #include +// +// int +// my_sd_journal_open(void *f, sd_journal **ret, int flags) +// { +// int (*sd_journal_open)(sd_journal **, int); +// +// sd_journal_open = f; +// return sd_journal_open(ret, flags); +// } +// +// int +// my_sd_journal_open_directory(void *f, sd_journal **ret, const char *path, int flags) +// { +// int (*sd_journal_open_directory)(sd_journal **, const char *, int); +// +// sd_journal_open_directory = f; +// return sd_journal_open_directory(ret, path, flags); +// } +// +// int +// my_sd_journal_open_files(void *f, sd_journal **ret, const char **paths, int flags) +// { +// int (*sd_journal_open_files)(sd_journal **, const char **, int); +// +// sd_journal_open_files = f; +// return sd_journal_open_files(ret, paths, flags); +// } +// +// void +// my_sd_journal_close(void *f, sd_journal *j) +// { +// int (*sd_journal_close)(sd_journal *); +// +// sd_journal_close = f; +// sd_journal_close(j); +// } +// +// int +// my_sd_journal_get_usage(void *f, sd_journal *j, uint64_t *bytes) +// { +// int (*sd_journal_get_usage)(sd_journal *, uint64_t *); +// +// sd_journal_get_usage = f; +// return sd_journal_get_usage(j, bytes); +// } +// +// int +// my_sd_journal_add_match(void *f, sd_journal *j, const void *data, size_t size) +// { +// int (*sd_journal_add_match)(sd_journal *, const void *, size_t); +// +// sd_journal_add_match = f; +// return sd_journal_add_match(j, data, size); +// } +// +// int +// my_sd_journal_add_disjunction(void *f, sd_journal *j) +// { +// int (*sd_journal_add_disjunction)(sd_journal *); +// +// sd_journal_add_disjunction = f; +// return sd_journal_add_disjunction(j); +// } +// +// int +// my_sd_journal_add_conjunction(void *f, sd_journal *j) +// { +// int (*sd_journal_add_conjunction)(sd_journal *); +// +// sd_journal_add_conjunction = f; +// return sd_journal_add_conjunction(j); +// } +// +// void +// my_sd_journal_flush_matches(void *f, sd_journal *j) +// { +// int (*sd_journal_flush_matches)(sd_journal *); +// +// sd_journal_flush_matches = f; +// sd_journal_flush_matches(j); +// } +// +// int +// my_sd_journal_next(void *f, sd_journal *j) +// { +// int (*sd_journal_next)(sd_journal *); +// +// sd_journal_next = f; +// return sd_journal_next(j); +// } +// +// int +// my_sd_journal_next_skip(void *f, sd_journal *j, uint64_t skip) +// { +// int (*sd_journal_next_skip)(sd_journal *, uint64_t); +// +// sd_journal_next_skip = f; +// return sd_journal_next_skip(j, skip); +// } +// +// int +// my_sd_journal_previous(void *f, sd_journal *j) +// { +// int (*sd_journal_previous)(sd_journal *); +// +// sd_journal_previous = f; +// return sd_journal_previous(j); +// } +// +// int +// my_sd_journal_previous_skip(void *f, sd_journal *j, uint64_t skip) +// { +// int (*sd_journal_previous_skip)(sd_journal *, uint64_t); +// +// sd_journal_previous_skip = f; +// return sd_journal_previous_skip(j, skip); +// } +// +// int +// my_sd_journal_get_data(void *f, sd_journal *j, const char *field, const void **data, size_t *length) +// { +// int (*sd_journal_get_data)(sd_journal *, const char *, const void **, size_t *); +// +// sd_journal_get_data = f; +// return sd_journal_get_data(j, field, data, length); +// } +// +// int +// my_sd_journal_set_data_threshold(void *f, sd_journal *j, size_t sz) +// { +// int (*sd_journal_set_data_threshold)(sd_journal *, size_t); +// +// sd_journal_set_data_threshold = f; +// return sd_journal_set_data_threshold(j, sz); +// } +// +// int +// my_sd_journal_get_cursor(void *f, sd_journal *j, char **cursor) +// { +// int (*sd_journal_get_cursor)(sd_journal *, char **); +// +// sd_journal_get_cursor = f; +// return sd_journal_get_cursor(j, cursor); +// } +// +// int +// my_sd_journal_test_cursor(void *f, sd_journal *j, const char *cursor) +// { +// int (*sd_journal_test_cursor)(sd_journal *, const char *); +// +// sd_journal_test_cursor = f; +// return sd_journal_test_cursor(j, cursor); +// } +// +// int +// my_sd_journal_get_realtime_usec(void *f, sd_journal *j, uint64_t *usec) +// { +// int (*sd_journal_get_realtime_usec)(sd_journal *, uint64_t *); +// +// sd_journal_get_realtime_usec = f; +// return sd_journal_get_realtime_usec(j, usec); +// } +// +// int +// my_sd_journal_get_monotonic_usec(void *f, sd_journal *j, uint64_t *usec, sd_id128_t *boot_id) +// { +// int (*sd_journal_get_monotonic_usec)(sd_journal *, uint64_t *, sd_id128_t *); +// +// sd_journal_get_monotonic_usec = f; +// return sd_journal_get_monotonic_usec(j, usec, boot_id); +// } +// +// int +// my_sd_journal_seek_head(void *f, sd_journal *j) +// { +// int (*sd_journal_seek_head)(sd_journal *); +// +// sd_journal_seek_head = f; +// return sd_journal_seek_head(j); +// } +// +// int +// my_sd_journal_seek_tail(void *f, sd_journal *j) +// { +// int (*sd_journal_seek_tail)(sd_journal *); +// +// sd_journal_seek_tail = f; +// return sd_journal_seek_tail(j); +// } +// +// +// int +// my_sd_journal_seek_cursor(void *f, sd_journal *j, const char *cursor) +// { +// int (*sd_journal_seek_cursor)(sd_journal *, const char *); +// +// sd_journal_seek_cursor = f; +// return sd_journal_seek_cursor(j, cursor); +// } +// +// int +// my_sd_journal_seek_realtime_usec(void *f, sd_journal *j, uint64_t usec) +// { +// int (*sd_journal_seek_realtime_usec)(sd_journal *, uint64_t); +// +// sd_journal_seek_realtime_usec = f; +// return sd_journal_seek_realtime_usec(j, usec); +// } +// +// int +// my_sd_journal_wait(void *f, sd_journal *j, uint64_t timeout_usec) +// { +// int (*sd_journal_wait)(sd_journal *, uint64_t); +// +// sd_journal_wait = f; +// return sd_journal_wait(j, timeout_usec); +// } +// +// void +// my_sd_journal_restart_data(void *f, sd_journal *j) +// { +// void (*sd_journal_restart_data)(sd_journal *); +// +// sd_journal_restart_data = f; +// sd_journal_restart_data(j); +// } +// +// int +// my_sd_journal_enumerate_data(void *f, sd_journal *j, const void **data, size_t *length) +// { +// int (*sd_journal_enumerate_data)(sd_journal *, const void **, size_t *); +// +// sd_journal_enumerate_data = f; +// return sd_journal_enumerate_data(j, data, length); +// } +// +// int +// my_sd_journal_query_unique(void *f, sd_journal *j, const char *field) +// { +// int(*sd_journal_query_unique)(sd_journal *, const char *); +// +// sd_journal_query_unique = f; +// return sd_journal_query_unique(j, field); +// } +// +// int +// my_sd_journal_enumerate_unique(void *f, sd_journal *j, const void **data, size_t *length) +// { +// int(*sd_journal_enumerate_unique)(sd_journal *, const void **, size_t *); +// +// sd_journal_enumerate_unique = f; +// return sd_journal_enumerate_unique(j, data, length); +// } +// +// void +// my_sd_journal_restart_unique(void *f, sd_journal *j) +// { +// void(*sd_journal_restart_unique)(sd_journal *); +// +// sd_journal_restart_unique = f; +// sd_journal_restart_unique(j); +// } +// +// int +// my_sd_journal_get_catalog(void *f, sd_journal *j, char **ret) +// { +// int(*sd_journal_get_catalog)(sd_journal *, char **); +// +// sd_journal_get_catalog = f; +// return sd_journal_get_catalog(j, ret); +// } +// +// int +// my_sd_id128_get_boot(void *f, sd_id128_t *boot_id) +// { +// int(*sd_id128_get_boot)(sd_id128_t *); +// +// sd_id128_get_boot = f; +// return sd_id128_get_boot(boot_id); +// } +// +// char * +// my_sd_id128_to_string(void *f, sd_id128_t boot_id, char s[SD_ID128_STRING_MAX]) +// { +// char *(*sd_id128_to_string)(sd_id128_t, char *); +// +// sd_id128_to_string = f; +// return sd_id128_to_string(boot_id, s); +// } +// +import "C" +import ( + "bytes" + "errors" + "fmt" + "strings" + "sync" + "syscall" + "time" + "unsafe" +) + +// Journal entry field strings which correspond to: +// http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html +const ( + // User Journal Fields + SD_JOURNAL_FIELD_MESSAGE = "MESSAGE" + SD_JOURNAL_FIELD_MESSAGE_ID = "MESSAGE_ID" + SD_JOURNAL_FIELD_PRIORITY = "PRIORITY" + SD_JOURNAL_FIELD_CODE_FILE = "CODE_FILE" + SD_JOURNAL_FIELD_CODE_LINE = "CODE_LINE" + SD_JOURNAL_FIELD_CODE_FUNC = "CODE_FUNC" + SD_JOURNAL_FIELD_ERRNO = "ERRNO" + SD_JOURNAL_FIELD_SYSLOG_FACILITY = "SYSLOG_FACILITY" + SD_JOURNAL_FIELD_SYSLOG_IDENTIFIER = "SYSLOG_IDENTIFIER" + SD_JOURNAL_FIELD_SYSLOG_PID = "SYSLOG_PID" + + // Trusted Journal Fields + SD_JOURNAL_FIELD_PID = "_PID" + SD_JOURNAL_FIELD_UID = "_UID" + SD_JOURNAL_FIELD_GID = "_GID" + SD_JOURNAL_FIELD_COMM = "_COMM" + SD_JOURNAL_FIELD_EXE = "_EXE" + SD_JOURNAL_FIELD_CMDLINE = "_CMDLINE" + SD_JOURNAL_FIELD_CAP_EFFECTIVE = "_CAP_EFFECTIVE" + SD_JOURNAL_FIELD_AUDIT_SESSION = "_AUDIT_SESSION" + SD_JOURNAL_FIELD_AUDIT_LOGINUID = "_AUDIT_LOGINUID" + SD_JOURNAL_FIELD_SYSTEMD_CGROUP = "_SYSTEMD_CGROUP" + SD_JOURNAL_FIELD_SYSTEMD_SESSION = "_SYSTEMD_SESSION" + SD_JOURNAL_FIELD_SYSTEMD_UNIT = "_SYSTEMD_UNIT" + SD_JOURNAL_FIELD_SYSTEMD_USER_UNIT = "_SYSTEMD_USER_UNIT" + SD_JOURNAL_FIELD_SYSTEMD_OWNER_UID = "_SYSTEMD_OWNER_UID" + SD_JOURNAL_FIELD_SYSTEMD_SLICE = "_SYSTEMD_SLICE" + SD_JOURNAL_FIELD_SELINUX_CONTEXT = "_SELINUX_CONTEXT" + SD_JOURNAL_FIELD_SOURCE_REALTIME_TIMESTAMP = "_SOURCE_REALTIME_TIMESTAMP" + SD_JOURNAL_FIELD_BOOT_ID = "_BOOT_ID" + SD_JOURNAL_FIELD_MACHINE_ID = "_MACHINE_ID" + SD_JOURNAL_FIELD_HOSTNAME = "_HOSTNAME" + SD_JOURNAL_FIELD_TRANSPORT = "_TRANSPORT" + + // Address Fields + SD_JOURNAL_FIELD_CURSOR = "__CURSOR" + SD_JOURNAL_FIELD_REALTIME_TIMESTAMP = "__REALTIME_TIMESTAMP" + SD_JOURNAL_FIELD_MONOTONIC_TIMESTAMP = "__MONOTONIC_TIMESTAMP" +) + +// Journal event constants +const ( + SD_JOURNAL_NOP = int(C.SD_JOURNAL_NOP) + SD_JOURNAL_APPEND = int(C.SD_JOURNAL_APPEND) + SD_JOURNAL_INVALIDATE = int(C.SD_JOURNAL_INVALIDATE) +) + +const ( + // IndefiniteWait is a sentinel value that can be passed to + // sdjournal.Wait() to signal an indefinite wait for new journal + // events. It is implemented as the maximum value for a time.Duration: + // https://github.com/golang/go/blob/e4dcf5c8c22d98ac9eac7b9b226596229624cb1d/src/time/time.go#L434 + IndefiniteWait time.Duration = 1<<63 - 1 +) + +var ( + // ErrNoTestCursor gets returned when using TestCursor function and cursor + // parameter is not the same as the current cursor position. + ErrNoTestCursor = errors.New("Cursor parameter is not the same as current position") +) + +// Journal is a Go wrapper of an sd_journal structure. +type Journal struct { + cjournal *C.sd_journal + mu sync.Mutex +} + +// JournalEntry represents all fields of a journal entry plus address fields. +type JournalEntry struct { + Fields map[string]string + Cursor string + RealtimeTimestamp uint64 + MonotonicTimestamp uint64 +} + +// Match is a convenience wrapper to describe filters supplied to AddMatch. +type Match struct { + Field string + Value string +} + +// String returns a string representation of a Match suitable for use with AddMatch. +func (m *Match) String() string { + return m.Field + "=" + m.Value +} + +// NewJournal returns a new Journal instance pointing to the local journal +func NewJournal() (j *Journal, err error) { + j = &Journal{} + + sd_journal_open, err := getFunction("sd_journal_open") + if err != nil { + return nil, err + } + + r := C.my_sd_journal_open(sd_journal_open, &j.cjournal, C.SD_JOURNAL_LOCAL_ONLY) + + if r < 0 { + return nil, fmt.Errorf("failed to open journal: %s", syscall.Errno(-r).Error()) + } + + return j, nil +} + +// NewJournalFromDir returns a new Journal instance pointing to a journal residing +// in a given directory. +func NewJournalFromDir(path string) (j *Journal, err error) { + j = &Journal{} + + sd_journal_open_directory, err := getFunction("sd_journal_open_directory") + if err != nil { + return nil, err + } + + p := C.CString(path) + defer C.free(unsafe.Pointer(p)) + + r := C.my_sd_journal_open_directory(sd_journal_open_directory, &j.cjournal, p, 0) + if r < 0 { + return nil, fmt.Errorf("failed to open journal in directory %q: %s", path, syscall.Errno(-r).Error()) + } + + return j, nil +} + +// NewJournalFromFiles returns a new Journal instance pointing to a journals residing +// in a given files. +func NewJournalFromFiles(paths ...string) (j *Journal, err error) { + j = &Journal{} + + sd_journal_open_files, err := getFunction("sd_journal_open_files") + if err != nil { + return nil, err + } + + // by making the slice 1 elem too long, we guarantee it'll be null-terminated + cPaths := make([]*C.char, len(paths)+1) + for idx, path := range paths { + p := C.CString(path) + cPaths[idx] = p + defer C.free(unsafe.Pointer(p)) + } + + r := C.my_sd_journal_open_files(sd_journal_open_files, &j.cjournal, &cPaths[0], 0) + if r < 0 { + return nil, fmt.Errorf("failed to open journals in paths %q: %s", paths, syscall.Errno(-r).Error()) + } + + return j, nil +} + +// Close closes a journal opened with NewJournal. +func (j *Journal) Close() error { + sd_journal_close, err := getFunction("sd_journal_close") + if err != nil { + return err + } + + j.mu.Lock() + C.my_sd_journal_close(sd_journal_close, j.cjournal) + j.mu.Unlock() + + return nil +} + +// AddMatch adds a match by which to filter the entries of the journal. +func (j *Journal) AddMatch(match string) error { + sd_journal_add_match, err := getFunction("sd_journal_add_match") + if err != nil { + return err + } + + m := C.CString(match) + defer C.free(unsafe.Pointer(m)) + + j.mu.Lock() + r := C.my_sd_journal_add_match(sd_journal_add_match, j.cjournal, unsafe.Pointer(m), C.size_t(len(match))) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add match: %s", syscall.Errno(-r).Error()) + } + + return nil +} + +// AddDisjunction inserts a logical OR in the match list. +func (j *Journal) AddDisjunction() error { + sd_journal_add_disjunction, err := getFunction("sd_journal_add_disjunction") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_add_disjunction(sd_journal_add_disjunction, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add a disjunction in the match list: %s", syscall.Errno(-r).Error()) + } + + return nil +} + +// AddConjunction inserts a logical AND in the match list. +func (j *Journal) AddConjunction() error { + sd_journal_add_conjunction, err := getFunction("sd_journal_add_conjunction") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_add_conjunction(sd_journal_add_conjunction, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add a conjunction in the match list: %s", syscall.Errno(-r).Error()) + } + + return nil +} + +// FlushMatches flushes all matches, disjunctions and conjunctions. +func (j *Journal) FlushMatches() { + sd_journal_flush_matches, err := getFunction("sd_journal_flush_matches") + if err != nil { + return + } + + j.mu.Lock() + C.my_sd_journal_flush_matches(sd_journal_flush_matches, j.cjournal) + j.mu.Unlock() +} + +// Next advances the read pointer into the journal by one entry. +func (j *Journal) Next() (uint64, error) { + sd_journal_next, err := getFunction("sd_journal_next") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_next(sd_journal_next, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error()) + } + + return uint64(r), nil +} + +// NextSkip advances the read pointer by multiple entries at once, +// as specified by the skip parameter. +func (j *Journal) NextSkip(skip uint64) (uint64, error) { + sd_journal_next_skip, err := getFunction("sd_journal_next_skip") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_next_skip(sd_journal_next_skip, j.cjournal, C.uint64_t(skip)) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error()) + } + + return uint64(r), nil +} + +// Previous sets the read pointer into the journal back by one entry. +func (j *Journal) Previous() (uint64, error) { + sd_journal_previous, err := getFunction("sd_journal_previous") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_previous(sd_journal_previous, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error()) + } + + return uint64(r), nil +} + +// PreviousSkip sets back the read pointer by multiple entries at once, +// as specified by the skip parameter. +func (j *Journal) PreviousSkip(skip uint64) (uint64, error) { + sd_journal_previous_skip, err := getFunction("sd_journal_previous_skip") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_previous_skip(sd_journal_previous_skip, j.cjournal, C.uint64_t(skip)) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %s", syscall.Errno(-r).Error()) + } + + return uint64(r), nil +} + +func (j *Journal) getData(field string) (unsafe.Pointer, C.int, error) { + sd_journal_get_data, err := getFunction("sd_journal_get_data") + if err != nil { + return nil, 0, err + } + + f := C.CString(field) + defer C.free(unsafe.Pointer(f)) + + var d unsafe.Pointer + var l C.size_t + + j.mu.Lock() + r := C.my_sd_journal_get_data(sd_journal_get_data, j.cjournal, f, &d, &l) + j.mu.Unlock() + + if r < 0 { + return nil, 0, fmt.Errorf("failed to read message: %s", syscall.Errno(-r).Error()) + } + + return d, C.int(l), nil +} + +// GetData gets the data object associated with a specific field from the +// the journal entry referenced by the last completed Next/Previous function +// call. To call GetData, you must have first called one of these functions. +func (j *Journal) GetData(field string) (string, error) { + d, l, err := j.getData(field) + if err != nil { + return "", err + } + + return C.GoStringN((*C.char)(d), l), nil +} + +// GetDataValue gets the data object associated with a specific field from the +// journal entry referenced by the last completed Next/Previous function call, +// returning only the value of the object. To call GetDataValue, you must first +// have called one of the Next/Previous functions. +func (j *Journal) GetDataValue(field string) (string, error) { + val, err := j.GetData(field) + if err != nil { + return "", err + } + + return strings.SplitN(val, "=", 2)[1], nil +} + +// GetDataBytes gets the data object associated with a specific field from the +// journal entry referenced by the last completed Next/Previous function call. +// To call GetDataBytes, you must first have called one of these functions. +func (j *Journal) GetDataBytes(field string) ([]byte, error) { + d, l, err := j.getData(field) + if err != nil { + return nil, err + } + + return C.GoBytes(d, l), nil +} + +// GetDataValueBytes gets the data object associated with a specific field from the +// journal entry referenced by the last completed Next/Previous function call, +// returning only the value of the object. To call GetDataValueBytes, you must first +// have called one of the Next/Previous functions. +func (j *Journal) GetDataValueBytes(field string) ([]byte, error) { + val, err := j.GetDataBytes(field) + if err != nil { + return nil, err + } + + return bytes.SplitN(val, []byte("="), 2)[1], nil +} + +// GetEntry returns a full representation of the journal entry referenced by the +// last completed Next/Previous function call, with all key-value pairs of data +// as well as address fields (cursor, realtime timestamp and monotonic timestamp). +// To call GetEntry, you must first have called one of the Next/Previous functions. +func (j *Journal) GetEntry() (*JournalEntry, error) { + sd_journal_get_realtime_usec, err := getFunction("sd_journal_get_realtime_usec") + if err != nil { + return nil, err + } + + sd_journal_get_monotonic_usec, err := getFunction("sd_journal_get_monotonic_usec") + if err != nil { + return nil, err + } + + sd_journal_get_cursor, err := getFunction("sd_journal_get_cursor") + if err != nil { + return nil, err + } + + sd_journal_restart_data, err := getFunction("sd_journal_restart_data") + if err != nil { + return nil, err + } + + sd_journal_enumerate_data, err := getFunction("sd_journal_enumerate_data") + if err != nil { + return nil, err + } + + j.mu.Lock() + defer j.mu.Unlock() + + var r C.int + entry := &JournalEntry{Fields: make(map[string]string)} + + var realtimeUsec C.uint64_t + r = C.my_sd_journal_get_realtime_usec(sd_journal_get_realtime_usec, j.cjournal, &realtimeUsec) + if r < 0 { + return nil, fmt.Errorf("failed to get realtime timestamp: %s", syscall.Errno(-r).Error()) + } + + entry.RealtimeTimestamp = uint64(realtimeUsec) + + var monotonicUsec C.uint64_t + var boot_id C.sd_id128_t + + r = C.my_sd_journal_get_monotonic_usec(sd_journal_get_monotonic_usec, j.cjournal, &monotonicUsec, &boot_id) + if r < 0 { + return nil, fmt.Errorf("failed to get monotonic timestamp: %s", syscall.Errno(-r).Error()) + } + + entry.MonotonicTimestamp = uint64(monotonicUsec) + + var c *C.char + // since the pointer is mutated by sd_journal_get_cursor, need to wait + // until after the call to free the memory + r = C.my_sd_journal_get_cursor(sd_journal_get_cursor, j.cjournal, &c) + defer C.free(unsafe.Pointer(c)) + if r < 0 { + return nil, fmt.Errorf("failed to get cursor: %s", syscall.Errno(-r).Error()) + } + + entry.Cursor = C.GoString(c) + + // Implements the JOURNAL_FOREACH_DATA_RETVAL macro from journal-internal.h + var d unsafe.Pointer + var l C.size_t + C.my_sd_journal_restart_data(sd_journal_restart_data, j.cjournal) + for { + r = C.my_sd_journal_enumerate_data(sd_journal_enumerate_data, j.cjournal, &d, &l) + if r == 0 { + break + } + + if r < 0 { + return nil, fmt.Errorf("failed to read message field: %s", syscall.Errno(-r).Error()) + } + + msg := C.GoStringN((*C.char)(d), C.int(l)) + kv := strings.SplitN(msg, "=", 2) + if len(kv) < 2 { + return nil, fmt.Errorf("failed to parse field") + } + + entry.Fields[kv[0]] = kv[1] + } + + return entry, nil +} + +// SetDataThreshold sets the data field size threshold for data returned by +// GetData. To retrieve the complete data fields this threshold should be +// turned off by setting it to 0, so that the library always returns the +// complete data objects. +func (j *Journal) SetDataThreshold(threshold uint64) error { + sd_journal_set_data_threshold, err := getFunction("sd_journal_set_data_threshold") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_set_data_threshold(sd_journal_set_data_threshold, j.cjournal, C.size_t(threshold)) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to set data threshold: %s", syscall.Errno(-r).Error()) + } + + return nil +} + +// GetRealtimeUsec gets the realtime (wallclock) timestamp of the journal +// entry referenced by the last completed Next/Previous function call. To +// call GetRealtimeUsec, you must first have called one of the Next/Previous +// functions. +func (j *Journal) GetRealtimeUsec() (uint64, error) { + var usec C.uint64_t + + sd_journal_get_realtime_usec, err := getFunction("sd_journal_get_realtime_usec") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_realtime_usec(sd_journal_get_realtime_usec, j.cjournal, &usec) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get realtime timestamp: %s", syscall.Errno(-r).Error()) + } + + return uint64(usec), nil +} + +// GetMonotonicUsec gets the monotonic timestamp of the journal entry +// referenced by the last completed Next/Previous function call. To call +// GetMonotonicUsec, you must first have called one of the Next/Previous +// functions. +func (j *Journal) GetMonotonicUsec() (uint64, error) { + var usec C.uint64_t + var boot_id C.sd_id128_t + + sd_journal_get_monotonic_usec, err := getFunction("sd_journal_get_monotonic_usec") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_monotonic_usec(sd_journal_get_monotonic_usec, j.cjournal, &usec, &boot_id) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get monotonic timestamp: %s", syscall.Errno(-r).Error()) + } + + return uint64(usec), nil +} + +// GetCursor gets the cursor of the last journal entry reeferenced by the +// last completed Next/Previous function call. To call GetCursor, you must +// first have called one of the Next/Previous functions. +func (j *Journal) GetCursor() (string, error) { + sd_journal_get_cursor, err := getFunction("sd_journal_get_cursor") + if err != nil { + return "", err + } + + var d *C.char + // since the pointer is mutated by sd_journal_get_cursor, need to wait + // until after the call to free the memory + + j.mu.Lock() + r := C.my_sd_journal_get_cursor(sd_journal_get_cursor, j.cjournal, &d) + j.mu.Unlock() + defer C.free(unsafe.Pointer(d)) + + if r < 0 { + return "", fmt.Errorf("failed to get cursor: %s", syscall.Errno(-r).Error()) + } + + cursor := C.GoString(d) + + return cursor, nil +} + +// TestCursor checks whether the current position in the journal matches the +// specified cursor +func (j *Journal) TestCursor(cursor string) error { + sd_journal_test_cursor, err := getFunction("sd_journal_test_cursor") + if err != nil { + return err + } + + c := C.CString(cursor) + defer C.free(unsafe.Pointer(c)) + + j.mu.Lock() + r := C.my_sd_journal_test_cursor(sd_journal_test_cursor, j.cjournal, c) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to test to cursor %q: %s", cursor, syscall.Errno(-r).Error()) + } else if r == 0 { + return ErrNoTestCursor + } + + return nil +} + +// SeekHead seeks to the beginning of the journal, i.e. the oldest available +// entry. This call must be followed by a call to Next before any call to +// Get* will return data about the first element. +func (j *Journal) SeekHead() error { + sd_journal_seek_head, err := getFunction("sd_journal_seek_head") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_head(sd_journal_seek_head, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to head of journal: %s", syscall.Errno(-r).Error()) + } + + return nil +} + +// SeekTail may be used to seek to the end of the journal, i.e. the most recent +// available entry. This call must be followed by a call to Previous before any +// call to Get* will return data about the last element. +func (j *Journal) SeekTail() error { + sd_journal_seek_tail, err := getFunction("sd_journal_seek_tail") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_tail(sd_journal_seek_tail, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to tail of journal: %s", syscall.Errno(-r).Error()) + } + + return nil +} + +// SeekRealtimeUsec seeks to the entry with the specified realtime (wallclock) +// timestamp, i.e. CLOCK_REALTIME. This call must be followed by a call to +// Next/Previous before any call to Get* will return data about the sought entry. +func (j *Journal) SeekRealtimeUsec(usec uint64) error { + sd_journal_seek_realtime_usec, err := getFunction("sd_journal_seek_realtime_usec") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_realtime_usec(sd_journal_seek_realtime_usec, j.cjournal, C.uint64_t(usec)) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to %d: %s", usec, syscall.Errno(-r).Error()) + } + + return nil +} + +// SeekCursor seeks to a concrete journal cursor. This call must be +// followed by a call to Next/Previous before any call to Get* will return +// data about the sought entry. +func (j *Journal) SeekCursor(cursor string) error { + sd_journal_seek_cursor, err := getFunction("sd_journal_seek_cursor") + if err != nil { + return err + } + + c := C.CString(cursor) + defer C.free(unsafe.Pointer(c)) + + j.mu.Lock() + r := C.my_sd_journal_seek_cursor(sd_journal_seek_cursor, j.cjournal, c) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to cursor %q: %s", cursor, syscall.Errno(-r).Error()) + } + + return nil +} + +// Wait will synchronously wait until the journal gets changed. The maximum time +// this call sleeps may be controlled with the timeout parameter. If +// sdjournal.IndefiniteWait is passed as the timeout parameter, Wait will +// wait indefinitely for a journal change. +func (j *Journal) Wait(timeout time.Duration) int { + var to uint64 + + sd_journal_wait, err := getFunction("sd_journal_wait") + if err != nil { + return -1 + } + + if timeout == IndefiniteWait { + // sd_journal_wait(3) calls for a (uint64_t) -1 to be passed to signify + // indefinite wait, but using a -1 overflows our C.uint64_t, so we use an + // equivalent hex value. + to = 0xffffffffffffffff + } else { + to = uint64(timeout / time.Microsecond) + } + j.mu.Lock() + r := C.my_sd_journal_wait(sd_journal_wait, j.cjournal, C.uint64_t(to)) + j.mu.Unlock() + + return int(r) +} + +// GetUsage returns the journal disk space usage, in bytes. +func (j *Journal) GetUsage() (uint64, error) { + var out C.uint64_t + + sd_journal_get_usage, err := getFunction("sd_journal_get_usage") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_usage(sd_journal_get_usage, j.cjournal, &out) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get journal disk space usage: %s", syscall.Errno(-r).Error()) + } + + return uint64(out), nil +} + +// GetUniqueValues returns all unique values for a given field. +func (j *Journal) GetUniqueValues(field string) ([]string, error) { + var result []string + + sd_journal_query_unique, err := getFunction("sd_journal_query_unique") + if err != nil { + return nil, err + } + + sd_journal_enumerate_unique, err := getFunction("sd_journal_enumerate_unique") + if err != nil { + return nil, err + } + + sd_journal_restart_unique, err := getFunction("sd_journal_restart_unique") + if err != nil { + return nil, err + } + + j.mu.Lock() + defer j.mu.Unlock() + + f := C.CString(field) + defer C.free(unsafe.Pointer(f)) + + r := C.my_sd_journal_query_unique(sd_journal_query_unique, j.cjournal, f) + + if r < 0 { + return nil, fmt.Errorf("failed to query journal: %s", syscall.Errno(-r).Error()) + } + + // Implements the SD_JOURNAL_FOREACH_UNIQUE macro from sd-journal.h + var d unsafe.Pointer + var l C.size_t + C.my_sd_journal_restart_unique(sd_journal_restart_unique, j.cjournal) + for { + r = C.my_sd_journal_enumerate_unique(sd_journal_enumerate_unique, j.cjournal, &d, &l) + if r == 0 { + break + } + + if r < 0 { + return nil, fmt.Errorf("failed to read message field: %s", syscall.Errno(-r).Error()) + } + + msg := C.GoStringN((*C.char)(d), C.int(l)) + kv := strings.SplitN(msg, "=", 2) + if len(kv) < 2 { + return nil, fmt.Errorf("failed to parse field") + } + + result = append(result, kv[1]) + } + + return result, nil +} + +// GetCatalog retrieves a message catalog entry for the journal entry referenced +// by the last completed Next/Previous function call. To call GetCatalog, you +// must first have called one of these functions. +func (j *Journal) GetCatalog() (string, error) { + sd_journal_get_catalog, err := getFunction("sd_journal_get_catalog") + if err != nil { + return "", err + } + + var c *C.char + + j.mu.Lock() + r := C.my_sd_journal_get_catalog(sd_journal_get_catalog, j.cjournal, &c) + j.mu.Unlock() + defer C.free(unsafe.Pointer(c)) + + if r < 0 { + return "", fmt.Errorf("failed to retrieve catalog entry for current journal entry: %s", syscall.Errno(-r).Error()) + } + + catalog := C.GoString(c) + + return catalog, nil +} + +// GetBootID get systemd boot id +func (j *Journal) GetBootID() (string, error) { + sd_id128_get_boot, err := getFunction("sd_id128_get_boot") + if err != nil { + return "", err + } + + var boot_id C.sd_id128_t + r := C.my_sd_id128_get_boot(sd_id128_get_boot, &boot_id) + if r < 0 { + return "", fmt.Errorf("failed to get boot id: %s", syscall.Errno(-r).Error()) + } + + sd_id128_to_string, err := getFunction("sd_id128_to_string") + if err != nil { + return "", err + } + + id128StringMax := C.size_t(C.SD_ID128_STRING_MAX) + c := (*C.char)(C.malloc(id128StringMax)) + defer C.free(unsafe.Pointer(c)) + C.my_sd_id128_to_string(sd_id128_to_string, boot_id, c) + + bootID := C.GoString(c) + if len(bootID) <= 0 { + return "", fmt.Errorf("get boot id %s is not valid", bootID) + } + + return bootID, nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/sdjournal/read.go b/vendor/github.com/coreos/go-systemd/v22/sdjournal/read.go new file mode 100644 index 00000000000..51a060fb530 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/sdjournal/read.go @@ -0,0 +1,272 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdjournal + +import ( + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + "time" +) + +var ( + // ErrExpired gets returned when the Follow function runs into the + // specified timeout. + ErrExpired = errors.New("Timeout expired") +) + +// JournalReaderConfig represents options to drive the behavior of a JournalReader. +type JournalReaderConfig struct { + // The Since, NumFromTail and Cursor options are mutually exclusive and + // determine where the reading begins within the journal. The order in which + // options are written is exactly the order of precedence. + Since time.Duration // start relative to a Duration from now + NumFromTail uint64 // start relative to the tail + Cursor string // start relative to the cursor + + // Show only journal entries whose fields match the supplied values. If + // the array is empty, entries will not be filtered. + Matches []Match + + // If not empty, the journal instance will point to a journal residing + // in this directory. The supplied path may be relative or absolute. + Path string + + // If not nil, Formatter will be used to translate the resulting entries + // into strings. If not set, the default format (timestamp and message field) + // will be used. If Formatter returns an error, Read will stop and return the error. + Formatter func(entry *JournalEntry) (string, error) +} + +// JournalReader is an io.ReadCloser which provides a simple interface for iterating through the +// systemd journal. A JournalReader is not safe for concurrent use by multiple goroutines. +type JournalReader struct { + journal *Journal + msgReader *strings.Reader + formatter func(entry *JournalEntry) (string, error) +} + +// NewJournalReader creates a new JournalReader with configuration options that are similar to the +// systemd journalctl tool's iteration and filtering features. +func NewJournalReader(config JournalReaderConfig) (*JournalReader, error) { + // use simpleMessageFormatter as default formatter. + if config.Formatter == nil { + config.Formatter = simpleMessageFormatter + } + + r := &JournalReader{ + formatter: config.Formatter, + } + + // Open the journal + var err error + if config.Path != "" { + r.journal, err = NewJournalFromDir(config.Path) + } else { + r.journal, err = NewJournal() + } + if err != nil { + return nil, err + } + + // Add any supplied matches + for _, m := range config.Matches { + if err = r.journal.AddMatch(m.String()); err != nil { + return nil, err + } + } + + // Set the start position based on options + if config.Since != 0 { + // Start based on a relative time + start := time.Now().Add(config.Since) + if err := r.journal.SeekRealtimeUsec(uint64(start.UnixNano() / 1000)); err != nil { + return nil, err + } + } else if config.NumFromTail != 0 { + // Start based on a number of lines before the tail + if err := r.journal.SeekTail(); err != nil { + return nil, err + } + + // Move the read pointer into position near the tail. Go one further than + // the option so that the initial cursor advancement positions us at the + // correct starting point. + skip, err := r.journal.PreviousSkip(config.NumFromTail + 1) + if err != nil { + return nil, err + } + // If we skipped fewer lines than expected, we have reached journal start. + // Thus, we seek to head so that next invocation can read the first line. + if skip != config.NumFromTail+1 { + if err := r.journal.SeekHead(); err != nil { + return nil, err + } + } + } else if config.Cursor != "" { + // Start based on a custom cursor + if err := r.journal.SeekCursor(config.Cursor); err != nil { + return nil, err + } + } + + return r, nil +} + +// Read reads entries from the journal. Read follows the Reader interface so +// it must be able to read a specific amount of bytes. Journald on the other +// hand only allows us to read full entries of arbitrary size (without byte +// granularity). JournalReader is therefore internally buffering entries that +// don't fit in the read buffer. Callers should keep calling until 0 and/or an +// error is returned. +func (r *JournalReader) Read(b []byte) (int, error) { + if r.msgReader == nil { + // Advance the journal cursor. It has to be called at least one time + // before reading + c, err := r.journal.Next() + + // An unexpected error + if err != nil { + return 0, err + } + + // EOF detection + if c == 0 { + return 0, io.EOF + } + + entry, err := r.journal.GetEntry() + if err != nil { + return 0, err + } + + // Build a message + msg, err := r.formatter(entry) + if err != nil { + return 0, err + } + r.msgReader = strings.NewReader(msg) + } + + // Copy and return the message + sz, err := r.msgReader.Read(b) + if err == io.EOF { + // The current entry has been fully read. Don't propagate this + // EOF, so the next entry can be read at the next Read() + // iteration. + r.msgReader = nil + return sz, nil + } + if err != nil { + return sz, err + } + if r.msgReader.Len() == 0 { + r.msgReader = nil + } + + return sz, nil +} + +// Close closes the JournalReader's handle to the journal. +func (r *JournalReader) Close() error { + return r.journal.Close() +} + +// Rewind attempts to rewind the JournalReader to the first entry. +func (r *JournalReader) Rewind() error { + r.msgReader = nil + return r.journal.SeekHead() +} + +// Follow synchronously follows the JournalReader, writing each new journal entry to writer. The +// follow will continue until a single time.Time is received on the until channel. +func (r *JournalReader) Follow(until <-chan time.Time, writer io.Writer) error { + + // Process journal entries and events. Entries are flushed until the tail or + // timeout is reached, and then we wait for new events or the timeout. + var msg = make([]byte, 64*1<<(10)) + var waitCh = make(chan int, 1) + var waitGroup sync.WaitGroup + defer waitGroup.Wait() + +process: + for { + c, err := r.Read(msg) + if err != nil && err != io.EOF { + return err + } + + select { + case <-until: + return ErrExpired + default: + } + if c > 0 { + if _, err = writer.Write(msg[:c]); err != nil { + return err + } + continue process + } + + // We're at the tail, so wait for new events or time out. + // Holds journal events to process. Tightly bounded for now unless there's a + // reason to unblock the journal watch routine more quickly. + for { + waitGroup.Add(1) + go func() { + status := r.journal.Wait(100 * time.Millisecond) + waitCh <- status + waitGroup.Done() + }() + + select { + case <-until: + return ErrExpired + case e := <-waitCh: + switch e { + case SD_JOURNAL_NOP: + // the journal did not change since the last invocation + case SD_JOURNAL_APPEND, SD_JOURNAL_INVALIDATE: + continue process + default: + if e < 0 { + return fmt.Errorf("received error event: %d", e) + } + + log.Printf("received unknown event: %d\n", e) + } + } + } + } +} + +// simpleMessageFormatter is the default formatter. +// It returns a string representing the current journal entry in a simple format which +// includes the entry timestamp and MESSAGE field. +func simpleMessageFormatter(entry *JournalEntry) (string, error) { + msg, ok := entry.Fields["MESSAGE"] + if !ok { + return "", fmt.Errorf("no MESSAGE field present in journal entry") + } + + usec := entry.RealtimeTimestamp + timestamp := time.Unix(0, int64(usec)*int64(time.Microsecond)) + + return fmt.Sprintf("%s %s\n", timestamp, msg), nil +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml new file mode 100644 index 00000000000..b94ff8cf92a --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -0,0 +1,21 @@ +# Copyright (C) 2017 SUSE LLC. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +language: go +go: + - 1.13.x + - 1.16.x + - tip +arch: + - AMD64 + - ppc64le +os: + - linux + - osx + +script: + - go test -cover -v ./... + +notifications: + email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE new file mode 100644 index 00000000000..bec842f294f --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 00000000000..3624617c89b --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,79 @@ +## `filepath-securejoin` ## + +[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) + +An implementation of `SecureJoin`, a [candidate for inclusion in the Go +standard library][go#20126]. The purpose of this function is to be a "secure" +alternative to `filepath.Join`, and in particular it provides certain +guarantees that are not provided by `filepath.Join`. + +> **NOTE**: This code is *only* safe if you are not at risk of other processes +> modifying path components after you've used `SecureJoin`. If it is possible +> for a malicious process to modify path components of the resolved path, then +> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There +> are some Linux kernel patches I'm working on which might allow for a better +> solution.][lwn-obeneath] +> +> In addition, with a slightly modified API it might be possible to use +> `O_PATH` and verify that the opened path is actually the resolved one -- but +> I have not done that yet. I might add it in the future as a helper function +> to help users verify the path (we can't just return `/proc/self/fd/` +> because that doesn't always work transparently for all users). + +This is the function prototype: + +```go +func SecureJoin(root, unsafePath string) (string, error) +``` + +This library **guarantees** the following: + +* If no error is set, the resulting string **must** be a child path of + `root` and will not contain any symlink path components (they will all be + expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existent path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[lwn-obeneath]: https://lwn.net/Articles/767547/ +[go#20126]: https://github.com/golang/go/issues/20126 + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 00000000000..7179039691c --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.2.3 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 00000000000..7dd08dbbdf7 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,115 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" +) + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + var path bytes.Buffer + n := 0 + for unsafePath != "" { + if n > 255 { + return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} + } + + // Next path component, p. + i := strings.IndexRune(unsafePath, filepath.Separator) + var p string + if i == -1 { + p, unsafePath = unsafePath, "" + } else { + p, unsafePath = unsafePath[:i], unsafePath[i+1:] + } + + // Create a cleaned path, using the lexical semantics of /../a, to + // create a "scoped" path component which can safely be joined to fullP + // for evaluation. At this point, path.String() doesn't contain any + // symlink components. + cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) + if cleanP == string(filepath.Separator) { + path.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullP) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + path.WriteString(p) + path.WriteRune(filepath.Separator) + continue + } + + // Only increment when we actually dereference a link. + n++ + + // It's a symlink, expand it by prepending it to the yet-unparsed path. + dest, err := vfs.Readlink(fullP) + if err != nil { + return "", err + } + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + path.Reset() + } + unsafePath = dest + string(filepath.Separator) + unsafePath + } + + // We have to clean path.String() here because it may contain '..' + // components that are entirely lexical, but would be misleading otherwise. + // And finally do a final clean to ensure that root is also lexically + // clean. + fullP := filepath.Clean(string(filepath.Separator) + path.String()) + return filepath.Clean(root + fullP), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 00000000000..a82a5eae11e --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/disiqueira/gotree/v3/.gitignore b/vendor/github.com/disiqueira/gotree/v3/.gitignore new file mode 100644 index 00000000000..3236c30ab6a --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/.gitignore @@ -0,0 +1,137 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea/ +GoTree.iml +### Linux template +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* +### Windows template +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/workspace.xml +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) + +# Folders + +# Architecture specific extensions/prefixes + + + +### OSX template +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk diff --git a/vendor/github.com/disiqueira/gotree/v3/.travis.yml b/vendor/github.com/disiqueira/gotree/v3/.travis.yml new file mode 100644 index 00000000000..29261dfffe1 --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/disiqueira/gotree +git: + depth: 1 +env: + - GO111MODULE=on + - GO111MODULE=off +go: [ 1.11.x, 1.12.x, 1.13.x ] +os: [ linux, osx ] +script: + - go test -race -v ./... diff --git a/vendor/github.com/disiqueira/gotree/v3/LICENSE b/vendor/github.com/disiqueira/gotree/v3/LICENSE new file mode 100644 index 00000000000..e790b5a5231 --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Diego Siqueira + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/disiqueira/gotree/v3/README.md b/vendor/github.com/disiqueira/gotree/v3/README.md new file mode 100644 index 00000000000..d09d4a98cda --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/README.md @@ -0,0 +1,104 @@ +# ![GoTree](https://rawgit.com/DiSiqueira/GoTree/master/gotree-logo.png) + +# GoTree ![Language Badge](https://img.shields.io/badge/Language-Go-blue.svg) ![Go Report](https://goreportcard.com/badge/github.com/DiSiqueira/GoTree) ![License Badge](https://img.shields.io/badge/License-MIT-blue.svg) ![Status Badge](https://img.shields.io/badge/Status-Beta-brightgreen.svg) [![GoDoc](https://godoc.org/github.com/DiSiqueira/GoTree?status.svg)](https://godoc.org/github.com/DiSiqueira/GoTree) [![Build Status](https://travis-ci.org/DiSiqueira/GoTree.svg?branch=master)](https://travis-ci.org/DiSiqueira/GoTree) + +Simple Go module to print tree structures in terminal. Heavily inpired by [The Tree Command for Linux][treecommand] + +The GoTree's goal is to be a simple tool providing a stupidly easy-to-use and fast way to print recursive structures. + +[treecommand]: http://mama.indstate.edu/users/ice/tree/ + +## Project Status + +GoTree is on beta. Pull Requests [are welcome](https://github.com/DiSiqueira/GoTree#social-coding) + +![](http://image.prntscr.com/image/2a0dbf0777454446b8083fb6a0dc51fe.png) + +## Features + +- Very simple and fast code +- Intuitive names +- Easy to extend +- Uses only native libs +- STUPIDLY [EASY TO USE](https://github.com/DiSiqueira/GoTree#usage) + +## Installation + +### Go Get + +```bash +$ go get github.com/disiqueira/gotree +``` + +## Usage + +### Simple create, populate and print example + +![](http://image.prntscr.com/image/dd2fe3737e6543f7b21941a6953598c2.png) + +```golang +package main + +import ( + "fmt" + + "github.com/disiqueira/gotree" +) + +func main() { + artist := gotree.New("Pantera") + album := artist.Add("Far Beyond Driven") + album.Add("5 minutes Alone") + + fmt.Println(artist.Print()) +} +``` + +## Contributing + +### Bug Reports & Feature Requests + +Please use the [issue tracker](https://github.com/DiSiqueira/GoTree/issues) to report any bugs or file feature requests. + +### Developing + +PRs are welcome. To begin developing, do this: + +```bash +$ git clone --recursive git@github.com:DiSiqueira/GoTree.git +$ cd GoTree/ +``` + +## Social Coding + +1. Create an issue to discuss about your idea +2. [Fork it] (https://github.com/DiSiqueira/GoTree/fork) +3. Create your feature branch (`git checkout -b my-new-feature`) +4. Commit your changes (`git commit -am 'Add some feature'`) +5. Push to the branch (`git push origin my-new-feature`) +6. Create a new Pull Request +7. Profit! :white_check_mark: + +## License + +The MIT License (MIT) + +Copyright (c) 2013-2018 Diego Siqueira + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/disiqueira/gotree/v3/_config.yml b/vendor/github.com/disiqueira/gotree/v3/_config.yml new file mode 100644 index 00000000000..c7418817439 --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-slate \ No newline at end of file diff --git a/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png b/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png new file mode 100644 index 00000000000..1735c6008d6 Binary files /dev/null and b/vendor/github.com/disiqueira/gotree/v3/gotree-logo.png differ diff --git a/vendor/github.com/disiqueira/gotree/v3/gotree.go b/vendor/github.com/disiqueira/gotree/v3/gotree.go new file mode 100644 index 00000000000..c529f62be0b --- /dev/null +++ b/vendor/github.com/disiqueira/gotree/v3/gotree.go @@ -0,0 +1,129 @@ +// Package gotree create and print tree. +package gotree + +import ( + "strings" +) + +const ( + newLine = "\n" + emptySpace = " " + middleItem = "├── " + continueItem = "│ " + lastItem = "└── " +) + +type ( + tree struct { + text string + items []Tree + } + + // Tree is tree interface + Tree interface { + Add(text string) Tree + AddTree(tree Tree) + Items() []Tree + Text() string + Print() string + } + + printer struct { + } + + // Printer is printer interface + Printer interface { + Print(Tree) string + } +) + +//New returns a new GoTree.Tree +func New(text string) Tree { + return &tree{ + text: text, + items: []Tree{}, + } +} + +//Add adds a node to the tree +func (t *tree) Add(text string) Tree { + n := New(text) + t.items = append(t.items, n) + return n +} + +//AddTree adds a tree as an item +func (t *tree) AddTree(tree Tree) { + t.items = append(t.items, tree) +} + +//Text returns the node's value +func (t *tree) Text() string { + return t.text +} + +//Items returns all items in the tree +func (t *tree) Items() []Tree { + return t.items +} + +//Print returns an visual representation of the tree +func (t *tree) Print() string { + return newPrinter().Print(t) +} + +func newPrinter() Printer { + return &printer{} +} + +//Print prints a tree to a string +func (p *printer) Print(t Tree) string { + return t.Text() + newLine + p.printItems(t.Items(), []bool{}) +} + +func (p *printer) printText(text string, spaces []bool, last bool) string { + var result string + for _, space := range spaces { + if space { + result += emptySpace + } else { + result += continueItem + } + } + + indicator := middleItem + if last { + indicator = lastItem + } + + var out string + lines := strings.Split(text, "\n") + for i := range lines { + text := lines[i] + if i == 0 { + out += result + indicator + text + newLine + continue + } + if last { + indicator = emptySpace + } else { + indicator = continueItem + } + out += result + indicator + text + newLine + } + + return out +} + +func (p *printer) printItems(t []Tree, spaces []bool) string { + var result string + for i, f := range t { + last := i == len(t)-1 + result += p.printText(f.Text(), spaces, last) + if len(f.Items()) > 0 { + spacesChild := append(spaces, last) + result += p.printItems(f.Items(), spacesChild) + } + } + return result +} diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 00000000000..f136c3433af --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 00000000000..1565e2af647 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion = "1.41" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier = "scratch" +) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 00000000000..19fc63d6589 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,7 @@ +//go:build !windows +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 00000000000..590ba5479be --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 00000000000..f07a02737f7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 00000000000..c24f57bc9a7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,11484 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.41" +info: + title: "Docker Engine API" + version: "1.41" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.41) is used. + For example, calling `/info` is the same as calling `/v1.41/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemory: + description: | + Kernel memory limit in bytes. + +


+ + > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated + > `kmem.limit_in_bytes`. + type: "integer" + format: "int64" + example: 209715200 + KernelMemoryTCP: + description: "Hard limit for kernel TCP buffer memory (in bytes)." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + x-nullable: true + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. + For example: + + ``` + {"net.ipv4.ip_forward": "1"} + ``` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. (Windows only) + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + + ContainerConfig: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: | + The name of the image to use when creating the container/ + type: "string" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + items: + type: "string" + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because containers/create currently + # does not support attaching to multiple networks, so the example request + # would be confusing if it showed that multiple networks can be contained + # in the EndpointsConfig. + # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: Name of the network'a bridge (for example, `docker0`). + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. + type: "string" + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. + type: "integer" + example: "64" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: "Information about a container's graph driver." + type: "object" + required: [Name, Data] + properties: + Name: + type: "string" + x-nullable: false + Data: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + + Image: + type: "object" + required: + - Id + - Parent + - Comment + - Created + - Container + - DockerVersion + - Author + - Architecture + - Os + - Size + - VirtualSize + - GraphDriver + - RootFS + properties: + Id: + type: "string" + x-nullable: false + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Created: + type: "string" + x-nullable: false + Container: + type: "string" + x-nullable: false + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + type: "string" + x-nullable: false + Author: + type: "string" + x-nullable: false + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + type: "string" + x-nullable: false + Os: + type: "string" + x-nullable: false + OsVersion: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + VirtualSize: + type: "integer" + format: "int64" + x-nullable: false + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + Metadata: + type: "object" + properties: + LastTagTime: + type: "string" + format: "dateTime" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + UsageData: + type: "object" + x-nullable: true + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + CreatedAt: "2016-06-07T20:31:11.853781916Z" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + type: "string" + example: "hw53o5aio51xtltp5xjp8v7fx" + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + MacAddress: + description: | + MAC address for the endpoint on this network. + type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether this container has been killed because it ran out of memory. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + x-nullable: true + $ref: "#/definitions/Health" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "19.03.12" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "19.03.12" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.40" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.12" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.13.14" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "4.19.76-linuxkit" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemory: + description: | + Indicates if the host has kernel memory limit support enabled. + +


+ + > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated + > `kmem.limit_in_bytes`. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + + > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) + > returns the Swarm version instead of the daemon version, for example + > `swarm/1.2.8`. + type: "string" + example: "17.06.0-ce" + ClusterStore: + description: | + URL of the distributed storage backend. + + + The storage backend is used for multihost networking (to store + network and endpoint information) and by the node discovery mechanism. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "consul://consul.corp.example.com:8600/some/path" + ClusterAdvertise: + description: | + The network endpoint that the Engine advertises for the purpose of + node discovery. ClusterAdvertise is a `host:port` combination on which + the daemon is reachable by other hosts. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "node5.corp.example.com:8000" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + title: "ContainerCreateResponse" + description: "OK response to ContainerCreate operation" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + x-nullable: true + $ref: "#/definitions/ContainerState" + Image: + description: "The container's image ID" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + Platform: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: | + The size of files that have been created or changed by this + container. + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Healthcheck: + Test: ["CMD-SHELL", "exit 0"] + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + IpcMode: "" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + Health: + Status: "healthy" + FailingStreak: 0 + Log: + - Start: "2019-12-22T10:59:05.6385933Z" + End: "2019-12-22T10:59:05.8078452Z" + ExitCode: 0 + Output: "" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: | + Each process running in the container, where each is process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + title: "ContainerChangeResponseItem" + description: "change item in response to ContainerChanges operation" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + title: "ContainerWaitResponse" + description: "OK response to ContainerWait operation" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + Error: + description: "container waiting error, if any" + type: "object" + properties: + Message: + description: "Details of an error" + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition, either + 'not-running' (default), 'next-exit', or 'removed'. + type: "string" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: | + The error message. Either "must specify path parameter" + (path cannot be empty) or "not a directory" (path was + asserted to be a directory but exists as a file). + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: | + The error message. Either "must specify path parameter" + (path cannot be empty) or "not a directory" (path was + asserted to be a directory but exists as a file). + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + Data: {} + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + title: "VolumeListResponse" + description: "Volume list response" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + + examples: + application/json: + Volumes: + - CreatedAt: "2017-07-19T12:00:26Z" + Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + description: "Volume configuration" + title: "VolumeConfig" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: | + Check for networks with duplicate names. Since Network is + primarily keyed based on a random ID and not on the name, and + network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed + way to check for duplicates. CheckDuplicate is there to provide + a best effort checking of any networks which has the same name + but it is not guaranteed to catch all name collisions. + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + title: "PluginPrivilegeItem" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same addres + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + title: "ServiceCreateResponse" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: | + A descriptor struct containing digest, media type, and size. + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 00000000000..ddf15bb182d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types // import "github.com/docker/docker/api/types" + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 00000000000..bf3463b90e7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 00000000000..9c464b73e25 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,419 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +// ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 00000000000..3dd133a3a58 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,66 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *specs.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 00000000000..f767195b94b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,69 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go new file mode 100644 index 00000000000..16dd5019eef --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -0,0 +1,20 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 00000000000..d0c852f84d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,20 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 00000000000..63381da3674 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process + // is an array of values corresponding to the titles. + // + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 00000000000..c10f175ea82 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 00000000000..49e05ae6694 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,28 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + +// ContainerWaitOKBody OK response to ContainerWait operation +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // error + // Required: true + Error *ContainerWaitOKBodyError `json:"Error"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 00000000000..2d1cbaa9abd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,447 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == "private" +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == "host" +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == "" +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +const ( + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") +) + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == "private" +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == "shareable" +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == "none" +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == "container" { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 00000000000..24c4fa8d900 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,42 @@ +//go:build !windows +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 00000000000..99f803a5bb1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 00000000000..cd8311f99cf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 00000000000..dc942d9d9ef --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 00000000000..f84f034cd54 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 00000000000..aa8fba81548 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,54 @@ +package events // import "github.com/docker/docker/api/types/events" + +const ( + // BuilderEventType is the event type that the builder generates + BuilderEventType = "builder" + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" + // ConfigEventType is the event type that configs generate + ConfigEventType = "config" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 00000000000..b4976a34717 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,322 @@ +/* +Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) + } + return keys +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte("{}"), nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +type invalidFilter string + +func (e invalidFilter) Error() string { + return "Invalid filter '" + string(e) + "'" +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return invalidFilter(name) + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 00000000000..4d9bf1c62c8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 00000000000..7592d2f8b15 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go new file mode 100644 index 00000000000..e302bb0aebb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -0,0 +1,36 @@ +package image // import "github.com/docker/docker/api/types/image" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem individual image layer information in response to ImageHistory operation +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 00000000000..b9a65a0d8e8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 00000000000..e145b3dcfcd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 00000000000..443b8d07a9f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,131 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 00000000000..437b184c67b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,126 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return filter.Validate(acceptedFilters) +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 00000000000..abae48b9ab0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 00000000000..56990106755 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 00000000000..32962dc2ebe --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 00000000000..c82f204e870 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 00000000000..5c031cf8b5c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 00000000000..60d1fb5ad85 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 00000000000..d91234744c6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 00000000000..f0a2113e405 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 00000000000..62a88f5be89 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,120 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 00000000000..74ea64b1bb6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 00000000000..20daebed14b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 00000000000..82921cebc15 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 00000000000..ef020f458bd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 00000000000..16202ccce61 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 00000000000..af5e1c0bc27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,80 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*units.Ulimit `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 00000000000..98ef3284d1d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 00000000000..1e30f5fa10d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 00000000000..0c77403ccff --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 00000000000..98c2806c31d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 00000000000..e45045866a6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,754 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: plugin.proto + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 00000000000..9ef169046b4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; + repeated string env = 5; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 00000000000..d5213ec981c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 00000000000..6eb452d24d1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,202 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 00000000000..b25f9996462 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,227 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 00000000000..a6f7ab7b5c7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,206 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go new file mode 100644 index 00000000000..84b6f073224 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 00000000000..2a74b7a5979 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,131 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 00000000000..e3a159912e2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,635 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Variant string `json:",omitempty"` + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// NetworkAddressPool is a temp struct used by Info struct +type NetworkAddressPool struct { + Base string + Size int +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` + + // This is exposed here only for internal use + // It is not currently supported to specify custom shim configs + Shim *ShimConfig `json:"-"` +} + +// ShimConfig is used by runtime to configure containerd shims +type ShimConfig struct { + Binary string + Opts interface{} +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuildCache []*BuildCache + BuilderSize int64 // deprecated +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 00000000000..c69b08448df --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,72 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go new file mode 100644 index 00000000000..8538078dd66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -0,0 +1,31 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// VolumeCreateBody Volume configuration +// swagger:model VolumeCreateBody +type VolumeCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go new file mode 100644 index 00000000000..be06179bf48 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -0,0 +1,23 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumeListOKBody Volume list response +// swagger:model VolumeListOKBody +type VolumeListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 00000000000..992f18117df --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 00000000000..3aae43e3d17 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 00000000000..397d67cdcf1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + + if err != nil { + return nil, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 00000000000..921024fe4fb --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 00000000000..54f55fa76e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 00000000000..66d46dd161b --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return checkpoints, wrapResponseError(err, resp, "container", container) + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 00000000000..0d3614d5dbd --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,306 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/api/ + +# Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewClientWithOpts(client.FromEnv), +or configured manually with NewClient(). + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } +*/ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "path" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewClientWithOpts initializes a new API client with default values. It takes functors +// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClientWithOpts(ops ...Opt) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) + if err != nil { + return nil, err + } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + client: client, + proto: defaultProto, + addr: defaultAddr, + } + + for _, op := range ops { + if err := op(c); err != nil { + return nil, err + } + } + + if c.scheme == "" { + c.scheme = "http" + + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } + } + + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + url, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := new(http.Transport) + sockets.ConfigureTransport(transport, url.Scheme, url.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = path.Join(cli.basePath, "/v"+v, p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion queries the API and updates the version to match the +// API version. Any errors are silently ignored. If a manual override is in place, +// either through the `DOCKER_API_VERSION` environment variable, or if the client +// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation +// will be performed. +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } +} + +// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion +// if the ping version is less than the default version. If a manual override is +// in place, either through the `DOCKER_API_VERSION` environment variable, or if +// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no +// negotiation is performed. +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if !cli.manualOverride { + cli.negotiateAPIVersionPing(p) + } +} + +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) negotiateAPIVersionPing(p types.Ping) { + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(p.APIVersion, cli.version) { + cli.version = p.APIVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + c := *cli.client + return &c +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return nil, fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// CustomHTTPHeaders returns the custom http headers stored by the client. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. +// Deprecated: use WithHTTPHeaders when creating the client. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers +} + +// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go new file mode 100644 index 00000000000..54cdfc29a84 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_deprecated.go @@ -0,0 +1,23 @@ +package client + +import "net/http" + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// Deprecated: use NewClientWithOpts +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use NewClientWithOpts(FromEnv) +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 00000000000..5846f888fea --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,10 @@ +//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly +// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly + +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 00000000000..c649e54412c --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,7 @@ +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 00000000000..ee7d411df06 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 00000000000..f1b0d7f7536 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 00000000000..565acc6e273 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 00000000000..a708fcaecfd --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "config", id) +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 00000000000..39e59cf8589 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 00000000000..3becefba083 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 00000000000..2966e88c8ec --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if !options.Pause { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 00000000000..bb278bf7f32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,103 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + defer ensureReaderClosed(response) + if err != nil { + return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) + } + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + defer ensureReaderClosed(response) + if err != nil { + return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) + } + + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + containerID + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) + } + + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 00000000000..c5079ee539e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,74 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "path" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + + if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + return response, err + } + + query := url.Values{} + if p := formatPlatform(platform); p != "" { + query.Set("platform", p) + } + + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). +// +// Similar to containerd's platforms.Format(), but does allow components to be +// omitted (e.g. pass "architecture" only, without "os": +// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 +func formatPlatform(platform *specs.Platform) string { + if platform == nil { + return "" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 00000000000..29dac8491df --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 00000000000..e3ee755b71d --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,54 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 00000000000..d0c0a5cbadf --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 00000000000..43db32bd973 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 00000000000..4d6f1d23da9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 00000000000..a973de597fd --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 00000000000..add852a833a --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,80 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, wrapResponseError(err, resp, "container", container) + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 00000000000..5e7271a371c --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 00000000000..04383deaaff --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 00000000000..df81461b889 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "container", containerID) +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 00000000000..240fdf552b4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 00000000000..a9d4c0c79a0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 00000000000..41e421969f4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 00000000000..c2e0b15dca8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 00000000000..0a6488dde82 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,42 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} + +// ContainerStatsOneShot gets a single stat entry from a container. +// It differs from `ContainerStats` in that the API should not wait to prime the stats +func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + query.Set("one-shot", "1") + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 00000000000..629d7ab64c8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 00000000000..a5b78999bf0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 00000000000..1d8f873169b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 00000000000..6917cf9fb36 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 00000000000..6ab8c1da96a --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,83 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 00000000000..354cd36939a --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return du, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 00000000000..f4e3794cb4c --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 00000000000..041bc8d49c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,138 @@ +package client // import "github.com/docker/docker/client" + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + return errors.As(err, &errConnectionFailed{}) +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility +type notFound interface { + error + NotFound() bool +} + +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. +func IsErrNotFound(err error) bool { + var e notFound + if errors.As(err, &e) { + return true + } + return errdefs.IsNotFound(err) +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +func wrapResponseError(err error, resp serverResponse, object, id string) error { + switch { + case err == nil: + return nil + case resp.statusCode == http.StatusNotFound: + return objectNotFoundError{object: object, id: id} + case resp.statusCode == http.StatusNotImplemented: + return errdefs.NotImplemented(err) + default: + return err + } +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + if _, ok := err.(unauthorizedError); ok { + return ok + } + return errdefs.IsUnauthorized(err) +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +type notImplementedError struct { + message string +} + +func (e notImplementedError) Error() string { + return e.message +} + +func (e notImplementedError) NotImplemented() bool { + return true +} + +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +func IsErrNotImplemented(err error) bool { + if _, ok := err.(notImplementedError); ok { + return ok + } + return errdefs.IsNotImplemented(err) +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 00000000000..f0dc9d9e12f --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,102 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 00000000000..e1dc49ef0f6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,145 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(ctx, path, query) + req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + conn, err := cli.setupHijackConn(ctx, req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest(http.MethodPost, url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(ctx, req, proto) +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + dialer := cli.Dialer() + conn, err := dialer(ctx) + if err != nil { + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + + //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite iff the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) + } + + return c, nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 00000000000..8fcf995036f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,146 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 00000000000..239380474e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 00000000000..b5bea10d8f6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 00000000000..d3336d4106a --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,40 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 00000000000..03aa12d8b4c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 00000000000..a4d7505094c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,46 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 00000000000..91016e493c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 00000000000..56af6d7f98f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 00000000000..a23975591be --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,64 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 00000000000..845580d4a4c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,54 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "errors" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + name := reference.FamiliarName(ref) + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 00000000000..84a41af0f2c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + var dels []types.ImageDeleteResponseItem + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return dels, wrapResponseError(err, resp, "image", imageID) + } + + err = json.NewDecoder(resp.body).Decode(&dels) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 00000000000..d1314e4b22f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 00000000000..82955a74775 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 00000000000..5652bfc252b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 00000000000..c856704e23f --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return info, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 00000000000..aabad4a9110 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,201 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net" + "net/http" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) + Close() error +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, network string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 00000000000..402ffb512cd --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 00000000000..5502cd74266 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 00000000000..f0585206382 --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 00000000000..57189461341 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 00000000000..278d9383a86 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 00000000000..dd156766566 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 00000000000..ecf20ceb6e4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResource, nil, wrapResponseError(err, resp, "network", networkID) + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 00000000000..ed2acb55711 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 00000000000..cebb1882192 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 00000000000..e71b16d8692 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "network", networkID) +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 00000000000..b58db528567 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 00000000000..c212906bc71 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 00000000000..03ab8780974 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "node", nodeID) +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 00000000000..de32a617fb0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go new file mode 100644 index 00000000000..6f77f0955f6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/options.go @@ -0,0 +1,172 @@ +package client + +import ( + "context" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + +// FromEnv configures the client with values from environment variables. +// +// Supported environment variables: +// DOCKER_HOST to set the url to the docker server. +// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// DOCKER_CERT_PATH to load the TLS certificates from. +// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func FromEnv(c *Client) error { + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + } + + if host := os.Getenv("DOCKER_HOST"); host != "" { + if err := WithHost(host)(c); err != nil { + return err + } + } + + if version := os.Getenv("DOCKER_API_VERSION"); version != "" { + if err := WithVersion(version)(c); err != nil { + return err + } + } + return nil +} + +// WithDialer applies the dialer.DialContext to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +// Deprecated: use WithDialContext +func WithDialer(dialer *net.Dialer) Opt { + return WithDialContext(dialer.DialContext) +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) Opt { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one +func WithScheme(scheme string) Opt { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 00000000000..a9af001ef46 --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,66 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/http" + "path" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err == nil { + defer ensureReaderClosed(serverResp) + switch serverResp.statusCode { + case http.StatusOK, http.StatusInternalServerError: + // Server handled the request, so parse the response + return parsePingResponse(cli, serverResp) + } + } else if IsErrConnectionFailed(err) { + return ping, err + } + + req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err = cli.doRequest(ctx, req) + defer ensureReaderClosed(serverResp) + if err != nil { + return ping, err + } + return parsePingResponse(cli, serverResp) +} + +func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { + var ping types.Ping + if resp.header == nil { + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) + } + ping.APIVersion = resp.header.Get("API-Version") + ping.OSType = resp.header.Get("OSType") + if resp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + if bv := resp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 00000000000..b95dbaf6863 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 00000000000..01f6574f952 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 00000000000..736da48bd10 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 00000000000..4a90bec51a0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, nil, wrapResponseError(err, resp, "plugin", name) + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 00000000000..012afe61cac --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 00000000000..cf1935e2f5e --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return plugins, wrapResponseError(err, resp, "plugin", "") + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 00000000000..d20bfe84479 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 00000000000..51ca1040d6d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "plugin", name) +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 00000000000..dcf5752ca2b --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 00000000000..115cea945ba --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 00000000000..d3d9a3fe64b --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,264 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == http.MethodPost || method == http.MethodPut) + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, errdefs.FromStatusCode(err, resp.statusCode) + } + err = cli.checkResponseErr(resp) + return resp, errdefs.FromStatusCode(err, resp.statusCode) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") + } else { + f.Close() + err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") + } + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + var body []byte + var err error + if serverResp.body != nil { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err = io.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return errors.Wrap(err, "Error reading JSON") + } + errorMessage = strings.TrimSpace(errorResponse.Message) + } else { + errorMessage = strings.TrimSpace(string(body)) + } + + return errors.Wrap(errors.New(errorMessage), "Error response from daemon") +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + for k, v := range headers { + req.Header[k] = v + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(io.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 00000000000..fd5b914136c --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 00000000000..c07c9550d44 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 00000000000..a0289c9f440 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 00000000000..c16f5558041 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "secret", id) +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 00000000000..164256bbc15 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretUpdate attempts to update a Secret +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 00000000000..e0428bf98b3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,178 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var response types.ServiceCreateResponse + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} + +func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { + warning = digestWarning(taskSpec.ContainerSpec.Image) + } else { + taskSpec.ContainerSpec.Image = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { + warning = digestWarning(taskSpec.PluginSpec.Remote) + } else { + taskSpec.PluginSpec.Remote = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// image unmodified in other situations. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return image +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 00000000000..c5368bab1e3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 00000000000..f97ec75a5cb --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 00000000000..906fd4059e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 00000000000..953a2adf5ae --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "service", serviceID) +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 00000000000..c63895f74f2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,75 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. +// It should be the value as set *before* the update. You can find this value in the Meta field +// of swarm.Service, which can be found using ServiceInspectWithRaw. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + response = types.ServiceUpdateResponse{} + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 00000000000..19f59dd582a --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 00000000000..da3c1637ef0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 00000000000..b52b67a8849 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 00000000000..a1cf0455d2b --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 00000000000..90ca84b363b --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 00000000000..d2412f7d441 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 00000000000..56a5bea761e --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 00000000000..fb0949da5be --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 00000000000..4869b44493b --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 00000000000..6222fab577d --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 00000000000..5541344366b --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "crypto/tls" + "net/http" +) + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 00000000000..7f3ff44eb80 --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,34 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToJSON(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 00000000000..8f17ff4e87a --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 00000000000..92761b3c639 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + return volume, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 00000000000..5c5b3f905c5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + if volumeID == "" { + return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volume, nil, wrapResponseError(err, resp, "volume", volumeID) + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 00000000000..942498dde2c --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 00000000000..6e324708f2b --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 00000000000..79decdafab8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/versions" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "volume", volumeID) +} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 00000000000..61e7456b4eb --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,69 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 00000000000..c211f174fc1 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 00000000000..fe06fb6f703 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,279 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +func (e errNotFound) Unwrap() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +func (e errInvalidParameter) Unwrap() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +func (e errConflict) Unwrap() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +func (e errUnauthorized) Unwrap() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +func (e errUnavailable) Unwrap() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +func (e errForbidden) Unwrap() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +func (e errSystem) Unwrap() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +func (e errNotModified) Unwrap() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +func (e errNotImplemented) Unwrap() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +func (e errUnknown) Unwrap() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +func (e errCancelled) Unwrap() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +func (e errDeadline) Unwrap() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +func (e errDataLoss) Unwrap() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 00000000000..5afe486779d --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,53 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "net/http" + + "github.com/sirupsen/logrus" +) + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return err + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + logrus.WithError(err).WithFields(logrus.Fields{ + "module": "api", + "status_code": statusCode, + }).Debug("FIXME: Got an status-code for which error does not match any expected type!!!") + + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 00000000000..3abf07d0c35 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,107 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 00000000000..5e6310fdcd6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,93 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" + "os" + "path/filepath" + "strings" +) + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 00000000000..fc48e674c11 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,28 @@ +//go:build !linux +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 00000000000..d1732dee52f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,39 @@ +//go:build !windows +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + "os/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000000..2f81813b287 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 00000000000..296c96a6334 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,240 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerPort := parts[n-1] + + switch n { + case 1: + return "", "", containerPort + case 2: + return "", parts[0], containerPort + case 3: + return parts[0], parts[1], containerPort + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + ip, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + if ip != "" && ip[0] == '[' { + // Strip [] from IPV6 addresses + rawIP, _, err := net.SplitHostPort(ip + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + } + ip = rawIP + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 00000000000..892adf8c667 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 00000000000..b6eed145e1c --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Less sorts the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok && len(binding) > 0 { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go index 98e9a1dc61b..c897cb02ade 100644 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -2,11 +2,8 @@ package sockets import ( "net" - "net/url" "os" "strings" - - "golang.org/x/net/proxy" ) // GetProxyEnv allows access to the uppercase and the lowercase forms of @@ -20,32 +17,12 @@ func GetProxyEnv(key string) string { return proxyValue } -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil +// DialerFromEnvironment was previously used to configure a net.Dialer to route +// connections through a SOCKS proxy. +// DEPRECATED: SOCKS proxies are now supported by configuring only +// http.Transport.Proxy, and no longer require changing http.Transport.Dial. +// Therefore, only sockets.ConfigureTransport() needs to be called, and any +// sockets.DialerFromEnvironment() calls can be dropped. +func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) { + return direct, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index a1d7beb4d80..2e9e9006f61 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -3,14 +3,9 @@ package sockets import ( "errors" - "net" "net/http" - "time" ) -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") @@ -26,13 +21,6 @@ func ConfigureTransport(tr *http.Transport, proto, addr string) error { return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 386cf0dbbde..10d76342652 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,6 +3,7 @@ package sockets import ( + "context" "fmt" "net" "net/http" @@ -10,7 +11,10 @@ import ( "time" ) -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +const ( + defaultTimeout = 10 * time.Second + maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { @@ -18,8 +22,11 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { } // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) + dialer := &net.Dialer{ + Timeout: defaultTimeout, + } + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, proto, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go index 5c21644e1fe..7acafc5a2ad 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -1,6 +1,7 @@ package sockets import ( + "context" "net" "net/http" "time" @@ -15,8 +16,8 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { func configureNpipeTransport(tr *http.Transport, proto, addr string) error { // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index a8b5dbb6fdc..e7591e6edbf 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,5 +1,51 @@ // +build !windows +/* +Package sockets is a simple unix domain socket wrapper. + +Usage + +For example: + + import( + "fmt" + "net" + "os" + "github.com/docker/go-connections/sockets" + ) + + func main() { + l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets", + sockets.WithChown(0,0),sockets.WithChmod(0660)) + if err != nil { + panic(err) + } + echoStr := "hello" + + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte(echoStr)) + conn.Close() + } + }() + + conn, err := net.Dial("unix", path) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + if _, err := conn.Read(buf); err != nil { + panic(err) + } else if string(buf) != echoStr { + panic(fmt.Errorf("Msg may lost")) + } + } +*/ package sockets import ( @@ -8,25 +54,73 @@ import ( "syscall" ) -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { +// SockOption sets up socket file's creating option +type SockOption func(string) error + +// WithChown modifies the socket file's uid and gid +func WithChown(uid, gid int) SockOption { + return func(path string) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + } +} + +// WithChmod modifies socket file's access mode. +func WithChmod(mask os.FileMode) SockOption { + return func(path string) error { + if err := os.Chmod(path, mask); err != nil { + return err + } + return nil + } +} + +// NewUnixSocketWithOpts creates a unix socket with the specified options. +// By default, socket permissions are 0000 (i.e.: no access for anyone); pass +// WithChmod() and WithChown() to set the desired ownership and permissions. +// +// This function temporarily changes the system's "umask" to 0777 to work around +// a race condition between creating the socket and setting its permissions. While +// this should only be for a short duration, it may affect other processes that +// create files/directories during that period. +func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) + // net.Listen does not allow for permissions to be set. As a result, when + // specifying custom permissions ("WithChmod()"), there is a short time + // between creating the socket and applying the permissions, during which + // the socket permissions are Less restrictive than desired. + // + // To work around this limitation of net.Listen(), we temporarily set the + // umask to 0777, which forces the socket to be created with 000 permissions + // (i.e.: no access for anyone). After that, WithChmod() must be used to set + // the desired permissions. + // + // We don't use "defer" here, to reset the umask to its original value as soon + // as possible. Ideally we'd be able to detect if WithChmod() was passed as + // an option, and skip changing umask if default permissions are used. + origUmask := syscall.Umask(0777) l, err := net.Listen("unix", path) + syscall.Umask(origUmask) if err != nil { return nil, err } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err + + for _, op := range opts { + if err := op(path); err != nil { + _ = l.Close() + return nil, err + } } + return l, nil } + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 0ef3fdcb469..992968373eb 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -53,18 +53,9 @@ var acceptedCBCCiphers = []uint16{ // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, @@ -72,25 +63,25 @@ func ServerDefault(ops ...func(*tls.Config)) *tls.Config { } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. @@ -108,11 +99,11 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pem, err := ioutil.ReadFile(caFile) + pemData, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pem) { + if !certPool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } return certPool, nil @@ -141,7 +132,7 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key +// password when trying to decrypt a TLS private key func IsErrEncryptedKey(err error) bool { return errors.Cause(err) == x509.IncorrectPasswordError } @@ -157,8 +148,8 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { } var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go new file mode 100644 index 00000000000..d8215f8e78a --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go @@ -0,0 +1,16 @@ +// +build go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go new file mode 100644 index 00000000000..a5ba7f4a388 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go @@ -0,0 +1,15 @@ +// +build !go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} diff --git a/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md new file mode 100644 index 00000000000..c88f9b2bdd0 --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +