From 2cd577bed37acf60d417ed21eb6c73c6a1ea817d Mon Sep 17 00:00:00 2001 From: Thomas Guettler Date: Fri, 22 Dec 2023 10:40:11 +0100 Subject: [PATCH] :sparkles: Add clusteraddon integration tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adding integration tests for clusteraddon controller that use the separate kind cluster. Signed-off-by: Thomas Güttler --- Makefile | 4 +- go.mod | 8 + go.sum | 27 + internal/test/helpers/envtest.go | 86 +- vendor/github.com/BurntSushi/toml/.gitignore | 2 + vendor/github.com/BurntSushi/toml/COMPATIBLE | 1 + vendor/github.com/BurntSushi/toml/COPYING | 21 + vendor/github.com/BurntSushi/toml/README.md | 211 ++ vendor/github.com/BurntSushi/toml/decode.go | 560 +++++ .../BurntSushi/toml/decode_go116.go | 19 + .../github.com/BurntSushi/toml/deprecated.go | 21 + vendor/github.com/BurntSushi/toml/doc.go | 13 + vendor/github.com/BurntSushi/toml/encode.go | 694 +++++++ vendor/github.com/BurntSushi/toml/error.go | 229 ++ .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 1219 +++++++++++ vendor/github.com/BurntSushi/toml/meta.go | 120 ++ vendor/github.com/BurntSushi/toml/parse.go | 763 +++++++ .../github.com/BurntSushi/toml/type_fields.go | 242 +++ .../github.com/BurntSushi/toml/type_toml.go | 70 + .../github.com/alessio/shellescape/.gitignore | 28 + .../alessio/shellescape/.golangci.yml | 64 + .../alessio/shellescape/.goreleaser.yml | 33 + vendor/github.com/alessio/shellescape/AUTHORS | 1 + .../alessio/shellescape/CODE_OF_CONDUCT.md | 76 + vendor/github.com/alessio/shellescape/LICENSE | 21 + .../github.com/alessio/shellescape/README.md | 61 + .../alessio/shellescape/shellescape.go | 66 + vendor/github.com/google/safetext/LICENSE | 202 ++ .../google/safetext/common/common.go | 260 +++ .../safetext/yamltemplate/yamltemplate.go | 657 ++++++ .../inconshreveable/mousetrap/LICENSE | 201 ++ .../inconshreveable/mousetrap/README.md | 23 + .../inconshreveable/mousetrap/trap_others.go | 16 + .../inconshreveable/mousetrap/trap_windows.go | 42 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 19 + .../mattn/go-isatty/isatty_others.go | 16 + .../mattn/go-isatty/isatty_plan9.go | 23 + .../mattn/go-isatty/isatty_solaris.go | 21 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 125 ++ .../pelletier/go-toml/.dockerignore | 2 + .../github.com/pelletier/go-toml/.gitignore | 5 + .../pelletier/go-toml/CONTRIBUTING.md | 132 ++ .../github.com/pelletier/go-toml/Dockerfile | 11 + vendor/github.com/pelletier/go-toml/LICENSE | 247 +++ vendor/github.com/pelletier/go-toml/Makefile | 29 + .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 + vendor/github.com/pelletier/go-toml/README.md | 176 ++ .../github.com/pelletier/go-toml/SECURITY.md | 19 + .../pelletier/go-toml/azure-pipelines.yml | 188 ++ .../github.com/pelletier/go-toml/benchmark.sh | 35 + vendor/github.com/pelletier/go-toml/doc.go | 23 + .../pelletier/go-toml/example-crlf.toml | 30 + .../github.com/pelletier/go-toml/example.toml | 30 + vendor/github.com/pelletier/go-toml/fuzz.go | 31 + vendor/github.com/pelletier/go-toml/fuzz.sh | 15 + .../pelletier/go-toml/keysparsing.go | 112 + vendor/github.com/pelletier/go-toml/lexer.go | 1031 +++++++++ .../github.com/pelletier/go-toml/localtime.go | 287 +++ .../github.com/pelletier/go-toml/marshal.go | 1308 ++++++++++++ .../go-toml/marshal_OrderPreserve_test.toml | 39 + .../pelletier/go-toml/marshal_test.toml | 39 + vendor/github.com/pelletier/go-toml/parser.go | 507 +++++ .../github.com/pelletier/go-toml/position.go | 29 + vendor/github.com/pelletier/go-toml/token.go | 136 ++ vendor/github.com/pelletier/go-toml/toml.go | 533 +++++ .../github.com/pelletier/go-toml/tomlpub.go | 71 + .../pelletier/go-toml/tomltree_create.go | 155 ++ .../pelletier/go-toml/tomltree_write.go | 552 +++++ .../pelletier/go-toml/tomltree_writepub.go | 6 + vendor/github.com/spf13/cobra/.gitignore | 39 + vendor/github.com/spf13/cobra/.golangci.yml | 62 + vendor/github.com/spf13/cobra/.mailmap | 3 + vendor/github.com/spf13/cobra/CONDUCT.md | 37 + vendor/github.com/spf13/cobra/CONTRIBUTING.md | 50 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 ++ vendor/github.com/spf13/cobra/MAINTAINERS | 13 + vendor/github.com/spf13/cobra/Makefile | 35 + vendor/github.com/spf13/cobra/README.md | 112 + vendor/github.com/spf13/cobra/active_help.go | 63 + vendor/github.com/spf13/cobra/active_help.md | 157 ++ vendor/github.com/spf13/cobra/args.go | 131 ++ .../spf13/cobra/bash_completions.go | 712 +++++++ .../spf13/cobra/bash_completions.md | 93 + .../spf13/cobra/bash_completionsV2.go | 396 ++++ vendor/github.com/spf13/cobra/cobra.go | 239 +++ vendor/github.com/spf13/cobra/command.go | 1834 +++++++++++++++++ .../github.com/spf13/cobra/command_notwin.go | 20 + vendor/github.com/spf13/cobra/command_win.go | 41 + vendor/github.com/spf13/cobra/completions.go | 878 ++++++++ .../spf13/cobra/fish_completions.go | 292 +++ .../spf13/cobra/fish_completions.md | 4 + vendor/github.com/spf13/cobra/flag_groups.go | 224 ++ .../spf13/cobra/powershell_completions.go | 325 +++ .../spf13/cobra/powershell_completions.md | 3 + .../spf13/cobra/projects_using_cobra.md | 64 + .../spf13/cobra/shell_completions.go | 98 + .../spf13/cobra/shell_completions.md | 576 ++++++ vendor/github.com/spf13/cobra/user_guide.md | 726 +++++++ .../github.com/spf13/cobra/zsh_completions.go | 308 +++ .../github.com/spf13/cobra/zsh_completions.md | 48 + vendor/modules.txt | 63 + vendor/sigs.k8s.io/kind/LICENSE | 201 ++ .../kind/pkg/apis/config/defaults/image.go | 21 + .../kind/pkg/apis/config/v1alpha4/default.go | 90 + .../kind/pkg/apis/config/v1alpha4/doc.go | 21 + .../kind/pkg/apis/config/v1alpha4/types.go | 317 +++ .../kind/pkg/apis/config/v1alpha4/yaml.go | 74 + .../config/v1alpha4/zz_generated.deepcopy.go | 213 ++ .../kind/pkg/cluster/constants/constants.go | 49 + .../kind/pkg/cluster/createoption.go | 126 ++ vendor/sigs.k8s.io/kind/pkg/cluster/doc.go | 18 + .../cluster/internal/create/actions/action.go | 90 + .../internal/create/actions/config/config.go | 279 +++ .../internal/create/actions/installcni/cni.go | 134 ++ .../create/actions/installstorage/storage.go | 99 + .../create/actions/kubeadminit/init.go | 148 ++ .../create/actions/kubeadmjoin/join.go | 139 ++ .../actions/loadbalancer/loadbalancer.go | 100 + .../actions/waitforready/waitforready.go | 147 ++ .../pkg/cluster/internal/create/create.go | 257 +++ .../pkg/cluster/internal/delete/delete.go | 54 + .../pkg/cluster/internal/kubeadm/config.go | 522 +++++ .../pkg/cluster/internal/kubeadm/const.go | 24 + .../kind/pkg/cluster/internal/kubeadm/doc.go | 18 + .../kubeconfig/internal/kubeconfig/encode.go | 64 + .../kubeconfig/internal/kubeconfig/helpers.go | 41 + .../kubeconfig/internal/kubeconfig/lock.go | 49 + .../kubeconfig/internal/kubeconfig/merge.go | 111 + .../kubeconfig/internal/kubeconfig/paths.go | 167 ++ .../kubeconfig/internal/kubeconfig/read.go | 82 + .../kubeconfig/internal/kubeconfig/remove.go | 111 + .../kubeconfig/internal/kubeconfig/types.go | 89 + .../kubeconfig/internal/kubeconfig/write.go | 44 + .../cluster/internal/kubeconfig/kubeconfig.go | 105 + .../cluster/internal/loadbalancer/config.go | 85 + .../cluster/internal/loadbalancer/const.go | 23 + .../pkg/cluster/internal/loadbalancer/doc.go | 18 + .../kind/pkg/cluster/internal/logs/doc.go | 18 + .../kind/pkg/cluster/internal/logs/logs.go | 105 + .../internal/providers/common/cgroups.go | 85 + .../internal/providers/common/constants.go | 21 + .../cluster/internal/providers/common/doc.go | 18 + .../internal/providers/common/getport.go | 47 + .../internal/providers/common/images.go | 33 + .../cluster/internal/providers/common/logs.go | 59 + .../internal/providers/common/namer.go | 37 + .../internal/providers/common/proxy.go | 64 + .../cluster/internal/providers/docker/OWNERS | 2 + .../internal/providers/docker/constants.go | 24 + .../internal/providers/docker/images.go | 91 + .../internal/providers/docker/network.go | 329 +++ .../cluster/internal/providers/docker/node.go | 171 ++ .../internal/providers/docker/provider.go | 344 ++++ .../internal/providers/docker/provision.go | 418 ++++ .../cluster/internal/providers/docker/util.go | 100 + .../cluster/internal/providers/podman/OWNERS | 13 + .../internal/providers/podman/constants.go | 24 + .../internal/providers/podman/images.go | 115 ++ .../internal/providers/podman/network.go | 146 ++ .../cluster/internal/providers/podman/node.go | 171 ++ .../internal/providers/podman/provider.go | 442 ++++ .../internal/providers/podman/provision.go | 436 ++++ .../cluster/internal/providers/podman/util.go | 169 ++ .../cluster/internal/providers/provider.go | 59 + .../sigs.k8s.io/kind/pkg/cluster/nodes/doc.go | 18 + .../kind/pkg/cluster/nodes/types.go | 40 + .../kind/pkg/cluster/nodeutils/doc.go | 19 + .../kind/pkg/cluster/nodeutils/roles.go | 153 ++ .../kind/pkg/cluster/nodeutils/util.go | 156 ++ .../sigs.k8s.io/kind/pkg/cluster/provider.go | 246 +++ vendor/sigs.k8s.io/kind/pkg/cmd/doc.go | 15 + vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go | 41 + .../kind/pkg/cmd/kind/version/version.go | 97 + vendor/sigs.k8s.io/kind/pkg/cmd/logger.go | 47 + .../sigs.k8s.io/kind/pkg/errors/aggregate.go | 49 + .../kind/pkg/errors/aggregate_forked.go | 167 ++ .../sigs.k8s.io/kind/pkg/errors/concurrent.go | 69 + vendor/sigs.k8s.io/kind/pkg/errors/doc.go | 18 + vendor/sigs.k8s.io/kind/pkg/errors/errors.go | 94 + vendor/sigs.k8s.io/kind/pkg/exec/default.go | 36 + vendor/sigs.k8s.io/kind/pkg/exec/doc.go | 20 + vendor/sigs.k8s.io/kind/pkg/exec/helpers.go | 142 ++ vendor/sigs.k8s.io/kind/pkg/exec/local.go | 157 ++ vendor/sigs.k8s.io/kind/pkg/exec/types.go | 70 + vendor/sigs.k8s.io/kind/pkg/fs/fs.go | 156 ++ .../pkg/internal/apis/config/cluster_util.go | 34 + .../internal/apis/config/convert_v1alpha4.go | 104 + .../kind/pkg/internal/apis/config/default.go | 104 + .../kind/pkg/internal/apis/config/doc.go | 23 + .../internal/apis/config/encoding/convert.go | 29 + .../pkg/internal/apis/config/encoding/doc.go | 18 + .../pkg/internal/apis/config/encoding/load.go | 93 + .../kind/pkg/internal/apis/config/types.go | 275 +++ .../kind/pkg/internal/apis/config/validate.go | 271 +++ .../apis/config/zz_generated.deepcopy.go | 196 ++ .../kind/pkg/internal/cli/logger.go | 255 +++ .../kind/pkg/internal/cli/override.go | 18 + .../kind/pkg/internal/cli/spinner.go | 168 ++ .../kind/pkg/internal/cli/status.go | 90 + .../sigs.k8s.io/kind/pkg/internal/env/term.go | 107 + .../kind/pkg/internal/patch/doc.go | 18 + .../kind/pkg/internal/patch/json6902patch.go | 53 + .../kind/pkg/internal/patch/kubeyaml.go | 79 + .../kind/pkg/internal/patch/matchinfo.go | 53 + .../kind/pkg/internal/patch/mergepatch.go | 58 + .../kind/pkg/internal/patch/resource.go | 148 ++ .../kind/pkg/internal/patch/toml.go | 100 + .../sigs.k8s.io/kind/pkg/internal/sets/doc.go | 25 + .../kind/pkg/internal/sets/empty.go | 23 + .../kind/pkg/internal/sets/string.go | 205 ++ .../kind/pkg/internal/version/doc.go | 22 + .../kind/pkg/internal/version/version.go | 325 +++ vendor/sigs.k8s.io/kind/pkg/log/doc.go | 19 + vendor/sigs.k8s.io/kind/pkg/log/noop.go | 47 + vendor/sigs.k8s.io/kind/pkg/log/types.go | 66 + 221 files changed, 32496 insertions(+), 7 deletions(-) create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/error.go create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/meta.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/BurntSushi/toml/type_toml.go create mode 100644 vendor/github.com/alessio/shellescape/.gitignore create mode 100644 vendor/github.com/alessio/shellescape/.golangci.yml create mode 100644 vendor/github.com/alessio/shellescape/.goreleaser.yml create mode 100644 vendor/github.com/alessio/shellescape/AUTHORS create mode 100644 vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/alessio/shellescape/LICENSE create mode 100644 vendor/github.com/alessio/shellescape/README.md create mode 100644 vendor/github.com/alessio/shellescape/shellescape.go create mode 100644 vendor/github.com/google/safetext/LICENSE create mode 100644 vendor/github.com/google/safetext/common/common.go create mode 100644 vendor/github.com/google/safetext/yamltemplate/yamltemplate.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/pelletier/go-toml/.dockerignore create mode 100644 vendor/github.com/pelletier/go-toml/.gitignore create mode 100644 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md create mode 100644 vendor/github.com/pelletier/go-toml/Dockerfile create mode 100644 vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 vendor/github.com/pelletier/go-toml/Makefile create mode 100644 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/pelletier/go-toml/README.md create mode 100644 vendor/github.com/pelletier/go-toml/SECURITY.md create mode 100644 vendor/github.com/pelletier/go-toml/azure-pipelines.yml create mode 100644 vendor/github.com/pelletier/go-toml/benchmark.sh create mode 100644 vendor/github.com/pelletier/go-toml/doc.go create mode 100644 vendor/github.com/pelletier/go-toml/example-crlf.toml create mode 100644 vendor/github.com/pelletier/go-toml/example.toml create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.sh create mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 vendor/github.com/pelletier/go-toml/localtime.go create mode 100644 vendor/github.com/pelletier/go-toml/marshal.go create mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml create mode 100644 vendor/github.com/pelletier/go-toml/marshal_test.toml create mode 100644 vendor/github.com/pelletier/go-toml/parser.go create mode 100644 vendor/github.com/pelletier/go-toml/position.go create mode 100644 vendor/github.com/pelletier/go-toml/token.go create mode 100644 vendor/github.com/pelletier/go-toml/toml.go create mode 100644 vendor/github.com/pelletier/go-toml/tomlpub.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_writepub.go create mode 100644 vendor/github.com/spf13/cobra/.gitignore create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml create mode 100644 vendor/github.com/spf13/cobra/.mailmap create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md create mode 100644 vendor/github.com/spf13/cobra/CONTRIBUTING.md create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/MAINTAINERS create mode 100644 vendor/github.com/spf13/cobra/Makefile create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/active_help.go create mode 100644 vendor/github.com/spf13/cobra/active_help.md create mode 100644 vendor/github.com/spf13/cobra/args.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md create mode 100644 vendor/github.com/spf13/cobra/bash_completionsV2.go create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/cobra/completions.go create mode 100644 vendor/github.com/spf13/cobra/fish_completions.go create mode 100644 vendor/github.com/spf13/cobra/fish_completions.md create mode 100644 vendor/github.com/spf13/cobra/flag_groups.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md create mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go create mode 100644 vendor/github.com/spf13/cobra/shell_completions.md create mode 100644 vendor/github.com/spf13/cobra/user_guide.md create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md create mode 100644 vendor/sigs.k8s.io/kind/LICENSE create mode 100644 vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cluster/provider.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cmd/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/cmd/logger.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/errors/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/errors/errors.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/exec/default.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/exec/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/exec/helpers.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/exec/local.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/exec/types.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/fs/fs.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/env/term.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/internal/version/version.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/log/doc.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/log/noop.go create mode 100644 vendor/sigs.k8s.io/kind/pkg/log/types.go diff --git a/Makefile b/Makefile index dec253006..20be8f817 100644 --- a/Makefile +++ b/Makefile @@ -322,13 +322,13 @@ KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env --bin-dir $(abspath .PHONY: test-unit test-unit: $(SETUP_ENVTEST) $(GOTESTSUM) $(HELM) ## Run unit @mkdir -p $(shell pwd)/.coverage - KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=.coverage/junit.xml --format testname -- -mod=vendor \ + CREATE_KIND_CLUSTER=true KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=.coverage/junit.xml --format testname -- -mod=vendor \ -covermode=atomic -coverprofile=.coverage/cover.out -p=4 ./internal/controller/... .PHONY: test-integration-github test-integration-github: $(SETUP_ENVTEST) $(GOTESTSUM) @mkdir -p $(shell pwd)/.coverage - KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=../.coverage/junit.xml --format testname -- -mod=vendor \ + CREATE_KIND_CLUSTER=false KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=../.coverage/junit.xml --format testname -- -mod=vendor \ -covermode=atomic -coverprofile=../.coverage/cover.out -p=1 ./internal/test/integration/github/... ##@ Verify diff --git a/go.mod b/go.mod index d0faf40f8..27556e4a7 100644 --- a/go.mod +++ b/go.mod @@ -19,12 +19,20 @@ require ( sigs.k8s.io/cluster-api v1.5.2 sigs.k8s.io/cluster-api/test v1.5.2 sigs.k8s.io/controller-runtime v0.15.1 + sigs.k8s.io/kind v0.20.0 ) require ( + github.com/BurntSushi/toml v1.0.0 // indirect + github.com/alessio/shellescape v1.4.1 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/cobra v1.7.0 // indirect github.com/stretchr/objx v0.5.1 // indirect k8s.io/cluster-bootstrap v0.27.2 // indirect ) diff --git a/go.sum b/go.sum index 4feaf5ffb..75481fd45 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= @@ -8,6 +10,8 @@ github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= @@ -37,6 +41,8 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0= github.com/coredns/corefile-migration v1.0.21 h1:W/DCETrHDiFo0Wj03EyMkaQ9fwsmSgqTCQDHpceaSsE= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -119,6 +125,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -129,6 +137,9 @@ github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -146,6 +157,9 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -160,11 +174,15 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -182,10 +200,14 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= @@ -281,7 +303,9 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -348,6 +372,7 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -395,6 +420,8 @@ sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUT sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= +sigs.k8s.io/kind v0.20.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= sigs.k8s.io/kustomize/api v0.13.2 h1:kejWfLeJhUsTGioDoFNJET5LQe/ajzXhJGYoU+pJsiA= sigs.k8s.io/kustomize/api v0.13.2/go.mod h1:DUp325VVMFVcQSq+ZxyDisA8wtldwHxLZbr1g94UHsw= sigs.k8s.io/kustomize/kyaml v0.14.1 h1:c8iibius7l24G2wVAGZn/Va2wNys03GXLjYVIcFVxKA= diff --git a/internal/test/helpers/envtest.go b/internal/test/helpers/envtest.go index ef0e0cac5..31a3faa01 100644 --- a/internal/test/helpers/envtest.go +++ b/internal/test/helpers/envtest.go @@ -25,6 +25,9 @@ import ( "path" "path/filepath" goruntime "runtime" + "strconv" + "strings" + "time" csov1alpha1 "github.com/SovereignCloudStack/cluster-stack-operator/api/v1alpha1" "github.com/SovereignCloudStack/cluster-stack-operator/internal/test/helpers/builder" @@ -39,8 +42,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -50,6 +55,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + kind "sigs.k8s.io/kind/pkg/cluster" ) func init() { @@ -76,6 +82,9 @@ const ( // DefaultPodNamespace is default the namespace for the envtest resources. DefaultPodNamespace = "cso-system" + + // defaultKindClusterNodeImage is default node image for kind cluster. + defaultKindClusterNodeImage = "kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb" //#nosec ) const ( @@ -130,11 +139,13 @@ type ( TestEnvironment struct { ctrl.Manager client.Client - KubeConfig string - Config *rest.Config - cancel context.CancelFunc - GitHubClientFactory githubclient.Factory - GitHubClient *githubmocks.Client + KubeConfig string + Config *rest.Config + cancel context.CancelFunc + kind *kind.Provider + WorkloadClusterClient *kubernetes.Clientset + GitHubClientFactory githubclient.Factory + GitHubClient *githubmocks.Client } ) @@ -177,6 +188,45 @@ func NewTestEnvironment() *TestEnvironment { GitHubClient: githubClient, } + if ifCreateKind() { + // Create kind cluster + klog.Info("creating kind cluster") + + cluster := kind.NewProvider(kind.ProviderWithDocker()) + err = cluster.Create( + DefaultKindClusterName, + kind.CreateWithWaitForReady(time.Minute*2), + kind.CreateWithNodeImage(defaultKindClusterNodeImage), + ) + if err != nil { + klog.Fatalf("unable to create kind cluster: %s", err) + } + klog.Infof("kind cluster created: %s", DefaultKindClusterName) + + // Get kind cluster kubeconfig + testEnv.KubeConfig, err = cluster.KubeConfig(DefaultKindClusterName, false) + if err != nil { + klog.Fatalf("unable to get kubeconfig: %s", err) + } + + testEnv.kind = cluster + + clientCfg, err := clientcmd.NewClientConfigFromBytes([]byte(testEnv.KubeConfig)) + if err != nil { + klog.Fatalf("failed to create client config from secret: %v", err) + } + + restConfig, err := clientCfg.ClientConfig() + if err != nil { + klog.Fatalf("failed to get rest config from client config: %v", err) + } + + testEnv.WorkloadClusterClient, err = kubernetes.NewForConfig(restConfig) + if err != nil { + klog.Fatalf("failed to get client set from rest config: %v", err) + } + } + return testEnv } @@ -192,6 +242,17 @@ func (t *TestEnvironment) StartManager(ctx context.Context) error { // Stop stops the manager and cancels the context. func (t *TestEnvironment) Stop() error { + if ifCreateKind() { + klog.Info("Deleting kind cluster") + err := t.kind.Delete(DefaultKindClusterName, "./kubeconfig") + if err != nil { + if !strings.Contains(err.Error(), "failed to update kubeconfig:") { + klog.Errorf("unable to delete kind cluster: %s", err) + } + } + klog.Info("successfully deleted kind cluster") + } + t.cancel() if err := env.Stop(); err != nil { return fmt.Errorf("failed to stop environment; %w", err) @@ -306,3 +367,18 @@ func envOr(envKey, defaultValue string) string { } return defaultValue } + +// IfCreateKind returns that to create kind cluster or not. +func ifCreateKind() bool { + createKind, ok := os.LookupEnv("CREATE_KIND_CLUSTER") + // default to true if not set + if createKind == "" || !ok { + createKind = "true" + } + ifCreate, err := strconv.ParseBool(createKind) + if err != nil { + klog.Fatalf("unable to parse CREATE_KIND_CLUSTER value: %s", err) + } + + return ifCreate +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 000000000..cd11be965 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 000000000..f621b0119 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1 @@ +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 000000000..01b574320 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 000000000..cc13f8667 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,211 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; install it with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Testing +This package passes all tests in [toml-test] for both the decoder and the +encoder. + +[toml-test]: https://github.com/BurntSushi/toml-test + +### Examples +This package works similar to how the Go standard library handles XML and JSON. +Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +err := toml.Decode(tomlData, &conf) +// handle error +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other most other decoders **only exported fields** are +considered when encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_example/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 000000000..e24f0c5d5 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,560 @@ +package toml + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strings" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use the PrimitiveDecode() function to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = 9007199254740991 // 2^53-1 +) + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps (dealer's choice – they can be +// used interchangeably). +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed +// in the local timezone. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, interface{}, or something + // that implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, e("cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + types: p.types, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + } + return md, md.unify(p.mapping, rv) +} + +// Decode the TOML data in to the pointer v. +// +// See the documentation on Decoder for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at path and decode it for you. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + if k := rv.Type().Key().Kind(); k != reflect.String { + return fmt.Errorf( + "toml: cannot decode to a map with non-string key type (%s in %q)", + k, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return e("value %f is out of range for float32", num) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + switch rv.Kind() { + case reflect.Float32: + if num < -maxSafeFloat32Int || num > maxSafeFloat32Int { + return e("value %d is out of range for float32", num) + } + fallthrough + case reflect.Float64: + if num < -maxSafeFloat64Int || num > maxSafeFloat64Int { + return e("value %d is out of range for float64", num) + } + rv.SetFloat(float64(num)) + default: + panic("bug") + } + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return md.badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +func (md *MetaData) badtype(dst string, data interface{}) error { + return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return true + } + return false +} + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 000000000..eddfb641b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,19 @@ +//go:build go1.16 +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS is just like Decode, except it will automatically read the contents +// of the file at `path` from a fs.FS instance. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 000000000..c6af3f239 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,21 @@ +package toml + +import ( + "encoding" + "io" +) + +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 000000000..099c4a77d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,13 @@ +/* +Package toml implements decoding and encoding of TOML files. + +This package supports TOML v1.0.0, as listed on https://toml.io + +There is also support for delaying decoding with the Primitive type, and +querying the set of keys in a TOML document with the MetaData type. + +The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +and can be used to verify if TOML document is valid. It can also be used to +print the type of each key. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 000000000..dee4e6d31 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,694 @@ +package toml + +import ( + "bufio" + "encoding" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for the Decode* functions. +// +// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // String to use for a single indentation level; default is two spaces. + Indent string + + w *bufio.Writer + hasWritten bool // written any output to w yet? +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the Encoder's writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case: time needs to be in ISO8601 format. + // + // Special case: if we can marshal the type to text, then we used that. This + // prevents the encoder for handling these types as generic structs (or + // whatever the underlying type of a TextMarshaler is). + switch t := rv.Interface().(type) { + case time.Time, encoding.TextMarshaler, Marshaler: + enc.writeKeyValue(key, rv, false) + return + // TODO: #76 would make this superfluous after implemented. + case Primitive: + enc.encode(key, reflect.ValueOf(t.undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + enc.writeQuoted(string(s)) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + enc.writeQuoted(string(s)) + return + } + + switch rv.Kind() { + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + continue + } + + frv := rv.Field(i) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + if getOptions(f.Tag).name == "" { + addFields(t, frv, append(start, f.Index...)) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) + } + continue + } + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + if is32Bit { + copyStart := make([]int, len(start)) + copy(copyStart, start) + fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + if _, ok := rv.Interface().(time.Time); ok { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + default: + if isMarshaler(rv) { + return tomlString + } + + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + switch rv.Interface().(type) { + case encoding.TextMarshaler: + return true + case Marshaler: + return true + } + + // Someone used a pointer receiver: we can make it work for pointer values. + if rv.CanAddr() { + if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok { + return true + } + if _, ok := rv.Addr().Interface().(Marshaler); ok { + return true + } + } + return false +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + + /// Don't allow nil. + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + if tomlTypeOfGo(rv.Index(i)) == nil { + encPanic(errArrayNilElement) + } + } + + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌────────────────────┐ +// │ ┌───┐ ┌─────┐│ +// v v v v vv +// key = {k = v, k2 = v2} +// +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + if len(key) == 0 { + encPanic(errNoKey) + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 000000000..36edc4655 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,229 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax. +// +// For example invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using ErrorWithLocation(): +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// Furthermore, the ErrorWithUsage() can be used to print the above with some +// more detailed usage guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + Line int // Line the error occurred. Deprecated: use Position. + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithUsage() returns the error with detailed location context. +// +// See the documentation on ParseError. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + } + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage() returns the error with detailed location context and usage +// guidance. +// +// See the documentation on ParseError. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + return m + "Error help:\n\n " + + strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") + + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errLexInvalidNum struct{ v string } + errLexInvalidDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } +func (e errLexInvalidNum) Usage() string { return "" } +func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } +func (e errLexInvalidDate) Usage() string { return "" } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 000000000..022f15bc2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 000000000..63ef20f47 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1219 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), `"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') +} +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 000000000..868619fb9 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,120 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]struct{} +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]interface{} + ok bool + hashOrVal interface{} = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if typ, ok := md.types[Key(key).String()]; ok { + return typ.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// values of this type. +type Key []string + +func (k Key) String() string { + ss := make([]string, len(k)) + for i := range k { + ss[i] = k.maybeQuoted(i) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, c := range k[i] { + if !isBareKeyChar(c) { + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 000000000..8269cca17 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,763 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + + ordered []Key // List of keys in the order that they appear in the TOML data. + mapping map[string]interface{} // Map keyname → key value. + types map[string]tomlType // Map keyname → TOML type. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicItemf(it item, format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...interface{}) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray) + + // p.setType(p.currentKey, typ) + var ( + types []tomlType + + // Initialize to a non-nil empty slice. This makes it consistent with + // how S = [] decodes into a non-nil slice inside something like struct + // { S []string }. See #338 + array = []interface{}{} + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + + // XXX: types isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType) { + p.setValue(key, val) + p.setType(key, typ) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.types[keyContext.String()] = typ +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// Remove newlines inside triple-quoted strings if a line ends with "\". +func stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + } + } + return strings.Join(split, "") +} + +func (p *parser) replaceEscapes(it item, str string) string { + replaced := make([]rune, 0, len(str)) + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 000000000..254ca82e5 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 000000000..4e90d7737 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/alessio/shellescape/.gitignore b/vendor/github.com/alessio/shellescape/.gitignore new file mode 100644 index 000000000..4ba7c2d13 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea/ + +escargs diff --git a/vendor/github.com/alessio/shellescape/.golangci.yml b/vendor/github.com/alessio/shellescape/.golangci.yml new file mode 100644 index 000000000..cd4a17e44 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.golangci.yml @@ -0,0 +1,64 @@ +# run: +# # timeout for analysis, e.g. 30s, 5m, default is 1m +# timeout: 5m + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - goconst + - gocritic + - gofmt + - goimports + - golint + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - maligned + - misspell + - prealloc + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - misspell + - wsl + +issues: + exclude-rules: + - text: "Use of weak random number generator" + linters: + - gosec + - text: "comment on exported var" + linters: + - golint + - text: "don't use an underscore in package name" + linters: + - golint + - text: "ST1003:" + linters: + - stylecheck + # FIXME: Disabled until golangci-lint updates stylecheck with this fix: + # https://github.com/dominikh/go-tools/issues/389 + - text: "ST1016:" + linters: + - stylecheck + +linters-settings: + dogsled: + max-blank-identifiers: 3 + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + +run: + tests: false diff --git a/vendor/github.com/alessio/shellescape/.goreleaser.yml b/vendor/github.com/alessio/shellescape/.goreleaser.yml new file mode 100644 index 000000000..064c9374d --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.goreleaser.yml @@ -0,0 +1,33 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + # You may remove this if you don't use go modules. + - go mod download + # you may remove this if you don't need go generate + - go generate ./... +builds: + - env: + - CGO_ENABLED=0 + main: ./cmd/escargs + goos: + - linux + - windows + - darwin +archives: + - replacements: + darwin: Darwin + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/alessio/shellescape/AUTHORS b/vendor/github.com/alessio/shellescape/AUTHORS new file mode 100644 index 000000000..4a647a6f4 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/AUTHORS @@ -0,0 +1 @@ +Alessio Treglia diff --git a/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md b/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..e8eda6062 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at alessio@debian.org. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/alessio/shellescape/LICENSE b/vendor/github.com/alessio/shellescape/LICENSE new file mode 100644 index 000000000..9f760679f --- /dev/null +++ b/vendor/github.com/alessio/shellescape/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Alessio Treglia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alessio/shellescape/README.md b/vendor/github.com/alessio/shellescape/README.md new file mode 100644 index 000000000..910bb253b --- /dev/null +++ b/vendor/github.com/alessio/shellescape/README.md @@ -0,0 +1,61 @@ +![Build](https://github.com/alessio/shellescape/workflows/Build/badge.svg) +[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/alessio/shellescape?tab=overview) +[![sourcegraph](https://sourcegraph.com/github.com/alessio/shellescape/-/badge.svg)](https://sourcegraph.com/github.com/alessio/shellescape) +[![codecov](https://codecov.io/gh/alessio/shellescape/branch/master/graph/badge.svg)](https://codecov.io/gh/alessio/shellescape) +[![Coverage](https://gocover.io/_badge/github.com/alessio/shellescape)](https://gocover.io/github.com/alessio/shellescape) +[![Go Report Card](https://goreportcard.com/badge/github.com/alessio/shellescape)](https://goreportcard.com/report/github.com/alessio/shellescape) + +# shellescape +Escape arbitrary strings for safe use as command line arguments. +## Contents of the package + +This package provides the `shellescape.Quote()` function that returns a +shell-escaped copy of a string. This functionality could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. + +This work was inspired by the Python original package +[shellescape](https://pypi.python.org/pypi/shellescape). + +## Usage + +The following snippet shows a typical unsafe idiom: + +```go +package main + +import ( + "fmt" + "os" +) + +func main() { + fmt.Printf("ls -l %s\n", os.Args[1]) +} +``` +_[See in Go Playground](https://play.golang.org/p/Wj2WoUfH_d)_ + +Especially when creating pipeline of commands which might end up being +executed by a shell interpreter, it is particularly unsafe to not +escape arguments. + +`shellescape.Quote()` comes in handy and to safely escape strings: + +```go +package main + +import ( + "fmt" + "os" + + "gopkg.in/alessio/shellescape.v1" +) + +func main() { + fmt.Printf("ls -l %s\n", shellescape.Quote(os.Args[1])) +} +``` +_[See in Go Playground](https://play.golang.org/p/HJ_CXgSrmp)_ + +## The escargs utility +__escargs__ reads lines from the standard input and prints shell-escaped versions. Unlinke __xargs__, blank lines on the standard input are not discarded. diff --git a/vendor/github.com/alessio/shellescape/shellescape.go b/vendor/github.com/alessio/shellescape/shellescape.go new file mode 100644 index 000000000..dc34a556a --- /dev/null +++ b/vendor/github.com/alessio/shellescape/shellescape.go @@ -0,0 +1,66 @@ +/* +Package shellescape provides the shellescape.Quote to escape arbitrary +strings for a safe use as command line arguments in the most common +POSIX shells. + +The original Python package which this work was inspired by can be found +at https://pypi.python.org/pypi/shellescape. +*/ +package shellescape // "import gopkg.in/alessio/shellescape.v1" + +/* +The functionality provided by shellescape.Quote could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. +*/ + +import ( + "regexp" + "strings" + "unicode" +) + +var pattern *regexp.Regexp + +func init() { + pattern = regexp.MustCompile(`[^\w@%+=:,./-]`) +} + +// Quote returns a shell-escaped version of the string s. The returned value +// is a string that can safely be used as one token in a shell command line. +func Quote(s string) string { + if len(s) == 0 { + return "''" + } + + if pattern.MatchString(s) { + return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'" + } + + return s +} + +// QuoteCommand returns a shell-escaped version of the slice of strings. +// The returned value is a string that can safely be used as shell command arguments. +func QuoteCommand(args []string) string { + l := make([]string, len(args)) + + for i, s := range args { + l[i] = Quote(s) + } + + return strings.Join(l, " ") +} + +// StripUnsafe remove non-printable runes, e.g. control characters in +// a string that is meant for consumption by terminals that support +// control characters. +func StripUnsafe(s string) string { + return strings.Map(func(r rune) rune { + if unicode.IsPrint(r) { + return r + } + + return -1 + }, s) +} diff --git a/vendor/github.com/google/safetext/LICENSE b/vendor/github.com/google/safetext/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/safetext/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/safetext/common/common.go b/vendor/github.com/google/safetext/common/common.go new file mode 100644 index 000000000..80a8bbd97 --- /dev/null +++ b/vendor/github.com/google/safetext/common/common.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package common implements common functionality for dealing with text/template. +package common + +import ( + "bytes" + "reflect" + "strings" + "sync" + "text/template" + "text/template/parse" + "unicode" + "unicode/utf8" +) + +// ContainsStringsWithSpecialCharacters determines whether an object contains interface{} strings that contain special characters. +func ContainsStringsWithSpecialCharacters(data interface{}, special string) bool { + if data == nil { + return false + } + + switch reflect.TypeOf(data).Kind() { + case reflect.Ptr: + p := reflect.ValueOf(data) + return !p.IsNil() && ContainsStringsWithSpecialCharacters(p.Elem().Interface(), special) + case reflect.String: + return strings.ContainsAny(reflect.ValueOf(data).String(), special) + case reflect.Slice, reflect.Array: + for i := 0; i < reflect.ValueOf(data).Len(); i++ { + if ContainsStringsWithSpecialCharacters(reflect.ValueOf(data).Index(i).Interface(), special) { + return true + } + } + case reflect.Map: + dataIter := reflect.ValueOf(data).MapRange() + for dataIter.Next() { + if ContainsStringsWithSpecialCharacters(dataIter.Value().Interface(), special) { + return true + } + } + case reflect.Struct: + t := reflect.TypeOf(data) + v := reflect.ValueOf(data) + n := v.NumField() + for i := 0; i < n; i++ { + r, _ := utf8.DecodeRuneInString(t.Field(i).Name) + if unicode.IsUpper(r) && ContainsStringsWithSpecialCharacters(v.Field(i).Interface(), special) { + return true + } + } + } + + return false +} + +// FuncMap to register new template objects with. +var FuncMap = map[string]interface{}{ + "textTemplateRemediationFunc": textTemplateRemediationFunc, + "StructuralData": echo, +} + +func echo(in interface{}) interface{} { + return in +} + +// BaselineString is a string callback function that just returns a constant string, +// used to get a baseline of how the resultant YAML is structured. +func BaselineString(string) string { + return "baseline" +} + +// stringCallback provides the callback for how strings should be manipulated before +// being pasted into the template execution result. +var stringCallback func(string) string +var stringCallbackLock sync.Mutex + +func textTemplateRemediationFunc(data interface{}) interface{} { + return deepCopyMutateStrings(data, stringCallback) +} + +// ExecuteWithCallback performs an execution on a callback-applied template +// (WalkApplyFuncToNonDeclaractiveActions) with a specified callback. +func ExecuteWithCallback(tmpl *template.Template, cb func(string) string, result *bytes.Buffer, data interface{}) error { + stringCallbackLock.Lock() + defer stringCallbackLock.Unlock() + stringCallback = cb + + return tmpl.Execute(result, data) +} + +func makePointer(data interface{}) interface{} { + rtype := reflect.New(reflect.TypeOf(data)) + rtype.Elem().Set(reflect.ValueOf(data)) + return rtype.Interface() +} + +func dereference(data interface{}) interface{} { + return reflect.ValueOf(data).Elem().Interface() +} + +func deepCopyMutateStrings(data interface{}, mutateF func(string) string) interface{} { + var r interface{} + + if data == nil { + return nil + } + + switch reflect.TypeOf(data).Kind() { + case reflect.Ptr: + p := reflect.ValueOf(data) + if p.IsNil() { + r = data + } else { + c := deepCopyMutateStrings(dereference(data), mutateF) + r = makePointer(c) + + // Sometimes we accidentally introduce one too minterface{} layers of indirection (seems related to protobuf generated fields like ReleaseNamespace *ReleaseNamespace `... reflect:"unexport"`) + if reflect.TypeOf(r) != reflect.TypeOf(data) { + r = c + } + } + case reflect.String: + return mutateF(reflect.ValueOf(data).String()) + case reflect.Slice, reflect.Array: + rc := reflect.MakeSlice(reflect.TypeOf(data), reflect.ValueOf(data).Len(), reflect.ValueOf(data).Len()) + for i := 0; i < reflect.ValueOf(data).Len(); i++ { + rc.Index(i).Set(reflect.ValueOf(deepCopyMutateStrings(reflect.ValueOf(data).Index(i).Interface(), mutateF))) + } + r = rc.Interface() + case reflect.Map: + rc := reflect.MakeMap(reflect.TypeOf(data)) + dataIter := reflect.ValueOf(data).MapRange() + for dataIter.Next() { + rc.SetMapIndex(dataIter.Key(), reflect.ValueOf(deepCopyMutateStrings(dataIter.Value().Interface(), mutateF))) + } + r = rc.Interface() + case reflect.Struct: + s := reflect.New(reflect.TypeOf(data)) + + t := reflect.TypeOf(data) + v := reflect.ValueOf(data) + n := v.NumField() + for i := 0; i < n; i++ { + r, _ := utf8.DecodeRuneInString(t.Field(i).Name) + + // Don't copy unexported fields + if unicode.IsUpper(r) { + reflect.Indirect(s).Field(i).Set( + reflect.ValueOf(deepCopyMutateStrings(v.Field(i).Interface(), mutateF)), + ) + } + } + + r = s.Interface() + default: + // No other types need special handling (int, bool, etc) + r = data + } + + return r +} + +func applyPipeCmds(cmds []*parse.CommandNode) { + applyFunc := "textTemplateRemediationFunc" + + for _, c := range cmds { + newArgs := make([]parse.Node, 0) + for i, a := range c.Args { + switch a := a.(type) { + case *parse.DotNode, *parse.FieldNode, *parse.VariableNode: + if i == 0 && len(c.Args) > 1 { + // If this is the first "argument" of multiple, then it is really a function + newArgs = append(newArgs, a) + } else { + // If this node is an argument to a call to "StructuralData", then pass it through as-is + switch identifier := c.Args[0].(type) { + case *parse.IdentifierNode: + if identifier.Ident == "StructuralData" { + newArgs = append(newArgs, a) + continue + } + } + + newPipe := &parse.PipeNode{NodeType: parse.NodePipe, Decl: nil} + newPipe.Cmds = []*parse.CommandNode{ + &parse.CommandNode{NodeType: parse.NodeCommand, Args: []parse.Node{a}}, + &parse.CommandNode{NodeType: parse.NodeCommand, Args: []parse.Node{&parse.IdentifierNode{NodeType: parse.NodeIdentifier, Ident: applyFunc}}}, + } + newArgs = append(newArgs, newPipe) + } + case *parse.PipeNode: + applyPipeCmds(a.Cmds) + newArgs = append(newArgs, a) + default: + newArgs = append(newArgs, a) + } + } + + c.Args = newArgs + } +} + +func branchNode(node parse.Node) *parse.BranchNode { + switch node := node.(type) { + case *parse.IfNode: + return &node.BranchNode + case *parse.RangeNode: + return &node.BranchNode + case *parse.WithNode: + return &node.BranchNode + } + + return nil +} + +// WalkApplyFuncToNonDeclaractiveActions walks the AST, applying a pipeline function to interface{} "paste" nodes (non-declarative action nodes) +func WalkApplyFuncToNonDeclaractiveActions(template *template.Template, node parse.Node) { + switch node := node.(type) { + case *parse.ActionNode: + // Non-declarative actions are paste actions + if len(node.Pipe.Decl) == 0 { + applyPipeCmds(node.Pipe.Cmds) + } + + case *parse.IfNode, *parse.RangeNode, *parse.WithNode: + nodeBranch := branchNode(node) + WalkApplyFuncToNonDeclaractiveActions(template, nodeBranch.List) + if nodeBranch.ElseList != nil { + WalkApplyFuncToNonDeclaractiveActions(template, nodeBranch.ElseList) + } + case *parse.ListNode: + for _, node := range node.Nodes { + WalkApplyFuncToNonDeclaractiveActions(template, node) + } + case *parse.TemplateNode: + tmpl := template.Lookup(node.Name) + if tmpl != nil { + treeCopy := tmpl.Tree.Copy() + WalkApplyFuncToNonDeclaractiveActions(tmpl, treeCopy.Root) + template.AddParseTree(node.Name, treeCopy) + } + } +} diff --git a/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go b/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go new file mode 100644 index 000000000..47d8f2838 --- /dev/null +++ b/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go @@ -0,0 +1,657 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package yamltemplate is a drop-in-replacement for using text/template to produce YAML, that adds automatic detection for YAML injection +package yamltemplate + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "reflect" + "text/template" + "text/template/parse" + "unicode" + "unicode/utf8" + + "gopkg.in/yaml.v3" + + "github.com/google/safetext/common" +) + +// ErrInvalidYAMLTemplate indicates the requested template is not valid YAML. +var ErrInvalidYAMLTemplate error = errors.New("Invalid YAML Template") + +// ErrYAMLInjection indicates the inputs resulted in YAML injection. +var ErrYAMLInjection error = errors.New("YAML Injection Detected") + +// ExecError is the custom error type returned when Execute has an +// error evaluating its template. (If a write error occurs, the actual +// error is returned; it will not be of type ExecError.) +type ExecError = template.ExecError + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +// +// Errors returned by Execute wrap the underlying error; call errors.As to +// uncover them. +// +// When template execution invokes a function with an argument list, that list +// must be assignable to the function's parameter types. Functions meant to +// apply to arguments of arbitrary type can use parameters of type interface{} or +// of type reflect.Value. Similarly, functions meant to return a result of arbitrary +// type can return interface{} or reflect.Value. +type FuncMap = template.FuncMap + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + unsafeTemplate *template.Template +} + +// New allocates a new, undefined template with the given name. +func New(name string) *Template { + return &Template{unsafeTemplate: template.New(name).Funcs(common.FuncMap)} +} + +const yamlSpecialCharacters = "{}[]&*#?|-.<>=!%@:\"'`,\r\n" + +func mapOrArray(in interface{}) bool { + return in != nil && (reflect.TypeOf(in).Kind() == reflect.Map || reflect.TypeOf(in).Kind() == reflect.Slice || reflect.TypeOf(in).Kind() == reflect.Array) +} + +func allKeysMatch(base interface{}, a interface{}, b interface{}) bool { + if base == nil { + return a == nil && b == nil + } + + switch reflect.TypeOf(base).Kind() { + case reflect.Ptr: + if reflect.TypeOf(a).Kind() != reflect.Ptr || reflect.TypeOf(b).Kind() != reflect.Ptr { + return false + } + + if !allKeysMatch(reflect.ValueOf(base).Elem().Interface(), reflect.ValueOf(a).Elem().Interface(), reflect.ValueOf(b).Elem().Interface()) { + return true + } + case reflect.Map: + if reflect.TypeOf(a).Kind() != reflect.Map || reflect.TypeOf(b).Kind() != reflect.Map { + return false + } + + if reflect.ValueOf(a).Len() != reflect.ValueOf(base).Len() || reflect.ValueOf(b).Len() != reflect.ValueOf(base).Len() { + return false + } + + basei := reflect.ValueOf(base).MapRange() + for basei.Next() { + av := reflect.ValueOf(a).MapIndex(basei.Key()) + bv := reflect.ValueOf(b).MapIndex(basei.Key()) + if !av.IsValid() || !bv.IsValid() || + !allKeysMatch(basei.Value().Interface(), av.Interface(), bv.Interface()) { + return false + } + } + case reflect.Slice, reflect.Array: + if reflect.TypeOf(a).Kind() != reflect.Slice && reflect.TypeOf(a).Kind() != reflect.Array && + reflect.TypeOf(b).Kind() != reflect.Slice && reflect.TypeOf(b).Kind() != reflect.Array { + return false + } + + if reflect.ValueOf(a).Len() != reflect.ValueOf(base).Len() || reflect.ValueOf(b).Len() != reflect.ValueOf(base).Len() { + return false + } + + for i := 0; i < reflect.ValueOf(base).Len(); i++ { + if !allKeysMatch(reflect.ValueOf(base).Index(i).Interface(), reflect.ValueOf(a).Index(i).Interface(), reflect.ValueOf(b).Index(i).Interface()) { + return false + } + } + case reflect.Struct: + n := reflect.ValueOf(base).NumField() + for i := 0; i < n; i++ { + baseit := reflect.TypeOf(base).Field(i) + ait := reflect.TypeOf(a).Field(i) + bit := reflect.TypeOf(b).Field(i) + + if baseit.Name != ait.Name || baseit.Name != bit.Name { + return false + } + + // Only compare public members (private members cannot be overwritten by text/template) + decodedName, _ := utf8.DecodeRuneInString(baseit.Name) + if unicode.IsUpper(decodedName) { + basei := reflect.ValueOf(base).Field(i) + ai := reflect.ValueOf(a).Field(i) + bi := reflect.ValueOf(b).Field(i) + + if !allKeysMatch(basei.Interface(), ai.Interface(), bi.Interface()) { + return false + } + } + } + case reflect.String: + // Baseline type of string was chosen arbitrarily, so just check that there isn't a new a map or slice/array injected, which would change the structure of the YAML + if mapOrArray(a) || mapOrArray(b) { + return false + } + default: + if reflect.TypeOf(a) != reflect.TypeOf(base) || reflect.TypeOf(b) != reflect.TypeOf(base) { + return false + } + } + + return true +} + +func unmarshalYaml(data []byte) ([]interface{}, error) { + r := make([]interface{}, 0) + + decoder := yaml.NewDecoder(bytes.NewReader(data)) + for { + var t interface{} + err := decoder.Decode(&t) + + if err == io.EOF { + break + } else if err == nil { + r = append(r, t) + } else { + return nil, err + } + } + + return r, nil +} + +// Mutation algorithm +func mutateString(s string) string { + // Longest possible output string is 2x the original + out := make([]rune, len(s)*2) + + i := 0 + for _, r := range s { + out[i] = r + i++ + + // Don't repeat quoting-related characters so as to not allow YAML context change in the mutation result + if r != '\\' && r != '\'' && r != '"' { + out[i] = r + i++ + } + } + + return string(out[:i]) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel, although if parallel +// executions share a Writer the output may be interleaved. +// +// If data is a reflect.Value, the template applies to the concrete +// value that the reflect.Value holds, as in fmt.Print. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + if data == nil { + return t.unsafeTemplate.Execute(wr, data) + } + + // An attacker may be able to cause type confusion or nil dereference panic during allKeysMatch + defer func() { + if r := recover(); r != nil { + err = ErrYAMLInjection + } + }() + + // Calculate requested result first + var requestedResult bytes.Buffer + + if err := t.unsafeTemplate.Execute(&requestedResult, data); err != nil { + return err + } + + // Fast path for if there are no YAML special characters in the input strings + if !common.ContainsStringsWithSpecialCharacters(data, yamlSpecialCharacters) { + // Note: We assume the result was valid YAML, and don't check for ErrInvalidYAMLTemplate + requestedResult.WriteTo(wr) + return nil + } + + walked, err := t.unsafeTemplate.Clone() + if err != nil { + return err + } + walked.Tree = walked.Tree.Copy() + + common.WalkApplyFuncToNonDeclaractiveActions(walked, walked.Tree.Root) + + // Get baseline + var baselineResult bytes.Buffer + if err = common.ExecuteWithCallback(walked, common.BaselineString, &baselineResult, data); err != nil { + return err + } + + parsedBaselineResult, err := unmarshalYaml(baselineResult.Bytes()) + if err != nil { + return ErrInvalidYAMLTemplate + } + + // If baseline was valid, request must also be valid YAML for no injection to have occurred + parsedRequestedResult, err := unmarshalYaml(requestedResult.Bytes()) + if err != nil { + return ErrYAMLInjection + } + + // Mutate the input + var mutatedResult bytes.Buffer + if err = common.ExecuteWithCallback(walked, mutateString, &mutatedResult, data); err != nil { + return err + } + + parsedMutatedResult, err := unmarshalYaml(mutatedResult.Bytes()) + if err != nil { + return ErrYAMLInjection + } + + // Compare results + if !allKeysMatch(parsedBaselineResult, parsedRequestedResult, parsedMutatedResult) { + return ErrYAMLInjection + } + + requestedResult.WriteTo(wr) + return nil +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.unsafeTemplate.Name() +} + +// New allocates a new, undefined template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +// +// Because associated templates share underlying data, template construction +// cannot be done safely in parallel. Once the templates are constructed, they +// can be executed in parallel. +func (t *Template) New(name string) *Template { + return &Template{unsafeTemplate: t.unsafeTemplate.New(name).Funcs(common.FuncMap)} +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt, err := t.unsafeTemplate.Clone() + return &Template{unsafeTemplate: nt}, err +} + +// AddParseTree associates the argument parse tree with the template t, giving +// it the specified name. If the template has not been defined, this tree becomes +// its definition. If it has been defined and already has that name, the existing +// definition is replaced; otherwise a new template is created, defined, and returned. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + nt, err := t.unsafeTemplate.AddParseTree(name, tree) + + if nt != t.unsafeTemplate { + return &Template{unsafeTemplate: nt}, err + } + return t, err +} + +// Option sets options for the template. Options are described by +// strings, either a simple string or "key=value". There can be at +// most one equals sign in an option string. If the option string +// is unrecognized or otherwise invalid, Option panics. +// +// Known options: +// +// missingkey: Control the behavior during execution if a map is +// indexed with a key that is not present in the map. +// +// "missingkey=default" or "missingkey=invalid" +// The default behavior: Do nothing and continue execution. +// If printed, the result of the index operation is the string +// "". +// "missingkey=zero" +// The operation returns the zero value for the map type's element. +// "missingkey=error" +// Execution stops immediately with an error. +func (t *Template) Option(opt ...string) *Template { + for _, s := range opt { + t.unsafeTemplate.Option(s) + } + return t +} + +// Templates returns a slice of defined templates associated with t. +func (t *Template) Templates() []*Template { + s := t.unsafeTemplate.Templates() + + var ns []*Template + for _, nt := range s { + ns = append(ns, &Template{unsafeTemplate: nt}) + } + + return ns +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel, although if parallel +// executions share a Writer the output may be interleaved. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.Lookup(name) + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.Name()) + } + return tmpl.Execute(wr, data) +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.unsafeTemplate.Delims(left, right) + return t +} + +// DefinedTemplates returns a string listing the defined templates, +// prefixed by the string "; defined templates are: ". If there are none, +// it returns the empty string. For generating an error message here +// and in html/template. +func (t *Template) DefinedTemplates() string { + return t.unsafeTemplate.DefinedTemplates() +} + +// Funcs adds the elements of the argument map to the template's function map. +// It must be called before the template is parsed. +// It panics if a value in the map is not a function with appropriate return +// type or if the name cannot be used syntactically as a function in a template. +// It is legal to overwrite elements of the map. The return value is the template, +// so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.unsafeTemplate.Funcs(funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t. +// It returns nil if there is no such template or the template has no definition. +func (t *Template) Lookup(name string) *Template { + nt := t.unsafeTemplate.Lookup(name) + + if nt == nil { + return nil + } + + if nt != t.unsafeTemplate { + return &Template{unsafeTemplate: nt} + } + + return t +} + +// Parse parses text as a template body for t. +// Named template definitions ({{define ...}} or {{block ...}} statements) in text +// define additional templates associated with t and are removed from the +// definition of t itself. +// +// Templates can be redefined in successive calls to Parse. +// A template definition with a body containing only white space and comments +// is considered empty and will not replace an existing template's body. +// This allows using Parse to add new named template definitions without +// overwriting the main template body. +func (t *Template) Parse(text string) (*Template, error) { + nt, err := t.unsafeTemplate.Parse(text) + + if nt != t.unsafeTemplate { + return &Template{unsafeTemplate: nt}, err + } + + return t, err +} + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +func readFileOS(file string) (name string, b []byte, err error) { + name = filepath.Base(file) + b, err = os.ReadFile(file) + return +} + +func readFileFS(fsys fs.FS) func(string) (string, []byte, error) { + return func(file string) (name string, b []byte, err error) { + name = path.Base(file) + b, err = fs.ReadFile(fsys, file) + return + } +} + +func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + name, b, err := readFile(filename) + if err != nil { + return nil, err + } + s := string(b) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, readFileOS, filenames...) +} + +func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) { + var filenames []string + for _, pattern := range patterns { + list, err := fs.Glob(fsys, pattern) + if err != nil { + return nil, err + } + if len(list) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + filenames = append(filenames, list...) + } + return parseFiles(t, readFileFS(fsys), filenames...) +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the base name and +// parsed contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template +// named "foo", while "a/foo" is unavailable. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, readFileOS, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +// Since the templates created by ParseFiles are named by the base +// names of the argument files, t should usually have the name of one +// of the (base) names of the files. If it does not, depending on t's +// contents before calling ParseFiles, t.Execute may fail. In that +// case use t.ExecuteTemplate to execute a valid template. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + // Ensure template is inited + t.Option() + + return parseFiles(t, readFileOS, filenames...) +} + +// ParseGlob creates a new Template and parses the template definitions from +// the files identified by the pattern. The files are matched according to the +// semantics of filepath.Match, and the pattern must match at least one file. +// The returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The files are matched +// according to the semantics of filepath.Match, and the pattern must match at +// least one file. ParseGlob is equivalent to calling t.ParseFiles with the +// list of files matched by the pattern. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + // Ensure template is inited + t.Option() + + return parseGlob(t, pattern) +} + +// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys +// instead of the host operating system's file system. +// It accepts a list of glob patterns. +// (Note that most file names serve as glob patterns matching only themselves.) +func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) { + return parseFS(nil, fsys, patterns) +} + +// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys +// instead of the host operating system's file system. +// It accepts a list of glob patterns. +// (Note that most file names serve as glob patterns matching only themselves.) +func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) { + // Ensure template is inited + t.Option() + + return parseFS(t, fsys, patterns) +} + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + template.HTMLEscape(w, b) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + return template.HTMLEscapeString(s) +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return template.HTMLEscaper(args) +} + +// IsTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. This is the definition of +// truth used by if and other such actions. +func IsTrue(val interface{}) (truth, ok bool) { + return template.IsTrue(val) +} + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + template.JSEscape(w, b) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + return template.JSEscapeString(s) +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return template.JSEscaper(args) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return template.URLQueryEscaper(args) +} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE new file mode 100644 index 000000000..5f920e973 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Alan Shreve (@inconshreveable) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 000000000..7a950d177 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go new file mode 100644 index 000000000..06a91f086 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -0,0 +1,16 @@ +//go:build !windows +// +build !windows + +package mousetrap + +// StartedByExplorer returns true if the program was invoked by the user +// double-clicking on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +// +// On non-Windows platforms, it always returns false. +func StartedByExplorer() bool { + return false +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go new file mode 100644 index 000000000..0c5688021 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -0,0 +1,42 @@ +package mousetrap + +import ( + "syscall" + "unsafe" +) + +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + pe, err := getProcessEntry(syscall.Getppid()) + if err != nil { + return false + } + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..38418353e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..d569c0c94 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,19 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly hurd +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..31503226f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,16 @@ +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 000000000..bae7f9bb3 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,23 @@ +//go:build plan9 +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..0c3acf2dc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,21 @@ +//go:build solaris && !appengine +// +build solaris,!appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 000000000..67787657f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..8e3c99171 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore new file mode 100644 index 000000000..7b5883475 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore new file mode 100644 index 000000000..e6ba63a5c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -0,0 +1,5 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md new file mode 100644 index 000000000..98b9893d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md @@ -0,0 +1,132 @@ +## Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use TOML +implementation for Go that gets the job done and gets out of your way – +dealing with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or +small, is more than welcomed! + +### Ask questions + +Any question you may have, somebody else might have it too. Always feel +free to ask them on the [issues tracker][issues-tracker]. We will try to +answer them as clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and +ask away! + +### Improve the documentation + +The best way to share your knowledge and experience with go-toml is to +improve the documentation. Fix a typo, clarify an interface, add an +example, anything goes! + +The documentation is present in the [README][readme] and thorough the +source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a +change to the documentation, create a pull request with your proposed +changes. For simple changes like that, the easiest way to go is probably +the "Fork this project and edit the file" button on Github, displayed at +the top right of the file. Unless it's a trivial change (for example a +typo), provide a little bit of context in your pull request description or +commit message. + +### Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and +fix by reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on +what to include. When in doubt: add more details! By reducing ambiguity and +providing more information, it decreases back and forth and saves everyone +time. + +### Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +* A short proposal with some POC code is better than a lengthy piece of + text with no code. Code speaks louder than words. +* No backward-incompatible patch will be accepted unless discussed. + Sometimes it's hard, and Go's lack of versioning by default does not + help, but we try not to break people's programs unless we absolutely have + to. +* If you are writing a new feature or extending an existing one, make sure + to write some documentation. +* Bug fixes need to be accompanied with regression tests. +* New code needs to be tested. +* Your commit messages need to explain why the change is needed, even if + already included in the PR description. + +It does sound like a lot, but those best practices are here to save time +overall and continuously improve the quality of the project, which is +something everyone benefits from. + +#### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. You're in! + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +#### Run the tests + +You can run tests for go-toml using Go's test tool: `go test ./...`. +When creating a pull requests, all tests will be ran on Linux on a few Go +versions (Travis CI), and on Windows using the latest Go version +(AppVeyor). + +#### Style + +Try to look around and follow the same format and structure as the rest of +the code. We enforce using `go fmt` on the whole code base. + +--- + +### Maintainers-only + +#### Merge pull request + +Checklist: + +* Passing CI. +* Does not introduce backward-incompatible changes (unless discussed). +* Has relevant doc changes. +* Has relevant unit tests. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +#### New release + +1. Go to [releases][releases]. Click on "X commits to master since this + release". +2. Make note of all the changes. Look for backward incompatible changes, + new features, and bug fixes. +3. Pick the new version using the above and semver. +4. Create a [new release][new-release]. +5. Follow the same format as [1.1.0][release-110]. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[releases]: https://github.com/pelletier/go-toml/releases +[new-release]: https://github.com/pelletier/go-toml/releases/new +[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile new file mode 100644 index 000000000..fffdb0166 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.12-alpine3.9 as builder +WORKDIR /go/src/github.com/pelletier/go-toml +COPY . . +ENV CGO_ENABLED=0 +ENV GOOS=linux +RUN go install ./... + +FROM scratch +COPY --from=builder /go/bin/tomll /usr/bin/tomll +COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 000000000..f414553c2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,247 @@ +The bulk of github.com/pelletier/go-toml is distributed under the MIT license +(see below), with the exception of localtime.go and localtime.test.go. +Those two files have been copied over from Google's civil library at revision +ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache +2.0 license (see below). + + +github.com/pelletier/go-toml: + + +The MIT License (MIT) + +Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +localtime.go, localtime_test.go: + +Originals: + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go +Changes: + * Renamed files from civil* to localtime*. + * Package changed from civil to toml. + * 'Local' prefix added to all structs. +License: + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile new file mode 100644 index 000000000..9e4503aea --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Makefile @@ -0,0 +1,29 @@ +export CGO_ENABLED=0 +go := go +go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) +go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) + +out.tools := tomll tomljson jsontoml +out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) +sources := $(wildcard **/*.go) + + +.PHONY: +tools: $(out.tools) + +$(out.tools): $(sources) + GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ + +.PHONY: +dist: $(out.dist) + +$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % + if [ "$(go.goos)" = "windows" ]; then \ + tar -cJf $@ $^.exe; \ + else \ + tar -cJf $@ $^; \ + fi + +.PHONY: +clean: + rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..041cdc4a2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +**Issue:** add link to pelletier/go-toml issue here + +Explanation of what this pull request does. + +More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md new file mode 100644 index 000000000..7399e04bf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -0,0 +1,176 @@ +# go-toml + +Go library for the [TOML](https://toml.io/) format. + +This library supports TOML version +[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) +[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) +[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) +[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) +[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) + + +## Development status + +**ℹ️ Consider go-toml v2!** + +The next version of go-toml is in [active development][v2-dev], and +[nearing completion][v2-map]. + +Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], +and [much faster][v2-bench]. If you only need reading and writing TOML documents +(majority of cases), those features are implemented and the API unlikely to +change. + +The remaining features will be added shortly. While pull-requests are welcome on +v1, no active development is expected on it. When v2.0.0 is released, v1 will be +deprecated. + +👉 [go-toml v2][v2] + +[v2]: https://github.com/pelletier/go-toml/tree/v2 +[v2-map]: https://github.com/pelletier/go-toml/discussions/506 +[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 +[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed +[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks + +## Features + +Go-toml provides the following features for using data parsed from TOML documents: + +* Load TOML documents from files and string data +* Easily navigate TOML structure using Tree +* Marshaling and unmarshaling to and from data structures +* Line & column position data for all parsed elements +* [Query support similar to JSON-Path](query/) +* Syntax errors contain line and column numbers + +## Import + +```go +import "github.com/pelletier/go-toml" +``` + +## Usage example + +Read a TOML document: + +```go +config, _ := toml.Load(` +[postgres] +user = "pelletier" +password = "mypassword"`) +// retrieve data directly +user := config.Get("postgres.user").(string) + +// or using an intermediate object +postgresConfig := config.Get("postgres").(*toml.Tree) +password := postgresConfig.Get("password").(string) +``` + +Or use Unmarshal: + +```go +type Postgres struct { + User string + Password string +} +type Config struct { + Postgres Postgres +} + +doc := []byte(` +[Postgres] +User = "pelletier" +Password = "mypassword"`) + +config := Config{} +toml.Unmarshal(doc, &config) +fmt.Println("user=", config.Postgres.User) +``` + +Or use a query: + +```go +// use a query to gather elements without walking the tree +q, _ := query.Compile("$..[user,password]") +results := q.Execute(config) +for ii, item := range results.Values() { + fmt.Printf("Query result %d: %v\n", ii, item) +} +``` + +## Documentation + +The documentation and additional examples are available at +[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). + +## Tools + +Go-toml provides three handy command line tools: + +* `tomll`: Reads TOML files and lints them. + + ``` + go install github.com/pelletier/go-toml/cmd/tomll + tomll --help + ``` +* `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + go install github.com/pelletier/go-toml/cmd/tomljson + tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + +### Docker image + +Those tools are also available as a Docker image from +[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to +use `tomljson`: + +``` +docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml +``` + +Only master (`latest`) and tagged versions are published to dockerhub. You +can build your own image as usual: + +``` +docker build -t go-toml . +``` + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be +much appreciated! + +### Run tests + +`go test ./...` + +### Fuzzing + +The script `./fuzz.sh` is available to +run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. + +## Versioning + +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version +of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of +this document. The last two major versions of Go are supported +(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md new file mode 100644 index 000000000..b2f21cfc9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Use this section to tell people about which versions of your project are +currently being supported with security updates. + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml new file mode 100644 index 000000000..4af198b4d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml @@ -0,0 +1,188 @@ +trigger: +- master + +stages: +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.16: + goVersion: '1.16' + imageName: 'ubuntu-latest' + mac 1.16: + goVersion: '1.16' + imageName: 'macOS-latest' + windows 1.16: + goVersion: '1.16' + imageName: 'windows-latest' + linux 1.15: + goVersion: '1.15' + imageName: 'ubuntu-latest' + mac 1.15: + goVersion: '1.15' + imageName: 'macOS-latest' + windows 1.15: + goVersion: '1.15' + imageName: 'windows-latest' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' +- stage: build_binaries + displayName: "Build binaries" + dependsOn: run_checks + jobs: + - job: build_binary + displayName: "Build binary" + strategy: + matrix: + linux_amd64: + GOOS: linux + GOARCH: amd64 + darwin_amd64: + GOOS: darwin + GOARCH: amd64 + windows_amd64: + GOOS: windows + GOARCH: amd64 + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go" + inputs: + version: 1.16 + - task: Bash@3 + inputs: + targetType: inline + script: "make dist" + env: + go.goos: $(GOOS) + go.goarch: $(GOARCH) + - task: CopyFiles@2 + inputs: + sourceFolder: '$(Build.SourcesDirectory)' + contents: '*.tar.xz' + TargetFolder: '$(Build.ArtifactStagingDirectory)' + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: binaries +- stage: build_binaries_manifest + displayName: "Build binaries manifest" + dependsOn: build_binaries + jobs: + - job: build_manifest + displayName: "Build binaries manifest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: 'current' + downloadType: 'single' + artifactName: 'binaries' + downloadPath: '$(Build.SourcesDirectory)' + - task: Bash@3 + inputs: + targetType: inline + script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: manifest + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh new file mode 100644 index 000000000..a69d3040f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -ex + +reference_ref=${1:-master} +reference_git=${2:-.} + +if ! `hash benchstat 2>/dev/null`; then + echo "Installing benchstat" + go get golang.org/x/perf/cmd/benchstat +fi + +tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` +ref_tempdir="${tempdir}/ref" +ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" +local_benchmark="`pwd`/benchmark-local.txt" + +echo "=== ${reference_ref} (${ref_tempdir})" +git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null +pushd ${ref_tempdir} >/dev/null +git checkout ${reference_ref} >/dev/null 2>/dev/null +go test -bench=. -benchmem | tee ${ref_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${ref_benchmark} +popd >/dev/null + +echo "" +echo "=== local" +go test -bench=. -benchmem | tee ${local_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${local_benchmark} + +echo "" +echo "=== diff" +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 000000000..a1406a32b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml new file mode 100644 index 000000000..780d9c68f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml new file mode 100644 index 000000000..f45bf88b8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 000000000..14570c8d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh new file mode 100644 index 000000000..3204b4c44 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.sh @@ -0,0 +1,15 @@ +#! /bin/sh +set -eu + +go get github.com/dvyukov/go-fuzz/go-fuzz +go get github.com/dvyukov/go-fuzz/go-fuzz-build + +if [ ! -e toml-fuzz.zip ]; then + go-fuzz-build github.com/pelletier/go-toml +fi + +rm -fr fuzz +mkdir -p fuzz/corpus +cp *.toml fuzz/corpus + +go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 000000000..e091500b2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,112 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "errors" + "fmt" +) + +// Convert the bare key group string to an array. +// The input supports double quotation and single quotation, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + runes := []rune(key) + var groups []string + + if len(key) == 0 { + return nil, errors.New("empty key") + } + + idx := 0 + for idx < len(runes) { + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip leading whitespace + } + if idx >= len(runes) { + break + } + r := runes[idx] + if isValidBareChar(r) { + // parse bare key + startIdx := idx + endIdx := -1 + idx++ + for idx < len(runes) { + r = runes[idx] + if isValidBareChar(r) { + idx++ + } else if r == '.' { + endIdx = idx + break + } else if isSpace(r) { + endIdx = idx + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip trailing whitespace + } + if idx < len(runes) && runes[idx] != '.' { + return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) + } + break + } else { + return nil, fmt.Errorf("invalid bare key character: %c", r) + } + } + if endIdx == -1 { + endIdx = idx + } + groups = append(groups, string(runes[startIdx:endIdx])) + } else if r == '\'' { + // parse single quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed single-quoted key") + } + r = runes[idx] + if r == '\'' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '"' { + // parse double quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed double-quoted key") + } + r = runes[idx] + if r == '"' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '.' { + idx++ + if idx >= len(runes) { + return nil, fmt.Errorf("unexpected end of key") + } + r = runes[idx] + if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { + return nil, fmt.Errorf("expecting key part after dot") + } + } else { + return nil, fmt.Errorf("invalid key character: %c", r) + } + } + if len(groups) == 0 { + return nil, fmt.Errorf("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || isDigit(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 000000000..313908e3e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,1031 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + brackets []rune + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '}': // after '{' + return l.lexRightCurlyBrace + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + return l.lexLeftBracket + case ']': + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue + } + return l.lexVoid + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + if next == '+' || next == '-' { + return l.lexNumber + } + + if isDigit(next) { + return l.lexDateTimeOrNumber + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { + // Could be either a date/time, or a digit. + // The options for date/times are: + // YYYY-... => date or date-time + // HH:... => time + // Anything else should be a number. + + lookAhead := l.peekString(5) + if len(lookAhead) < 3 { + return l.lexNumber() + } + + for idx, r := range lookAhead { + if !isDigit(r) { + if idx == 2 && r == ':' { + return l.lexDateTimeOrTime() + } + if idx == 4 && r == '-' { + return l.lexDateTimeOrTime() + } + return l.lexNumber() + } + } + return l.lexNumber() +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + l.brackets = append(l.brackets, '{') + return l.lexVoid +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { + // Example matches: + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + // 07:32:00 + // 00:32:00.999999 + + // we already know those two are digits + l.next() + l.next() + + // Got 2 digits. At that point it could be either a time or a date(-time). + + r := l.next() + if r == ':' { + return l.lexTime() + } + + return l.lexDateTime() +} + +func (l *tomlLexer) lexDateTime() tomlLexStateFn { + // This state accepts an offset date-time, a local date-time, or a local date. + // + // v--- cursor + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + + // date + + // already checked by lexRvalue + l.next() // digit + l.next() // - + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid month digit in date: %c", r) + } + } + + r := l.next() + if r != '-' { + return l.errorf("expected - to separate month of a date, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid day digit in date: %c", r) + } + } + + l.emit(tokenLocalDate) + + r = l.peek() + + if r == eof { + + return l.lexRvalue + } + + if r != ' ' && r != 'T' { + return l.errorf("incorrect date/time separation character: %c", r) + } + + if r == ' ' { + lookAhead := l.peekString(3)[1:] + if len(lookAhead) < 2 { + return l.lexRvalue + } + for _, r := range lookAhead { + if !isDigit(r) { + return l.lexRvalue + } + } + } + + l.skip() // skip the T or ' ' + + // time + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid hour digit in time: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time hour/minute separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time minute/second separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid second digit in time: %c", r) + } + } + + r = l.peek() + if r == '.' { + l.next() + r := l.next() + if !isDigit(r) { + return l.errorf("expected at least one digit in time's fraction, not %c", r) + } + + for { + r := l.peek() + if !isDigit(r) { + break + } + l.next() + } + } + + l.emit(tokenLocalTime) + + return l.lexTimeOffset + +} + +func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { + // potential offset + + // Z + // -07:00 + // +07:00 + // nothing + + r := l.peek() + + if r == 'Z' { + l.next() + l.emit(tokenTimeOffset) + } else if r == '+' || r == '-' { + l.next() + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid hour digit in time offset: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time offset hour/minute separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time offset: %c", r) + } + } + + l.emit(tokenTimeOffset) + } + + return l.lexRvalue +} + +func (l *tomlLexer) lexTime() tomlLexStateFn { + // v--- cursor + // 07:32:00 + // 00:32:00.999999 + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time: %c", r) + } + } + + r := l.next() + if r != ':' { + return l.errorf("time minute/second separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid second digit in time: %c", r) + } + } + + r = l.peek() + if r == '.' { + l.next() + r := l.next() + if !isDigit(r) { + return l.errorf("expected at least one digit in time's fraction, not %c", r) + } + + for { + r := l.peek() + if !isDigit(r) { + break + } + l.next() + } + } + + l.emit(tokenLocalTime) + return l.lexRvalue + +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + var sb strings.Builder + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("\"") + sb.WriteString(str) + sb.WriteString("\"") + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("'") + sb.WriteString(str) + sb.WriteString("'") + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + var str strings.Builder + str.WriteString(" ") + + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str.WriteString(".") + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + sb.WriteString(str.String()) + continue + } else if r == '.' { + // skip + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + sb.WriteRune(r) + l.next() + } + l.emitWithValue(tokenKey, sb.String()) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return sb.String(), nil + } + + next := l.peek() + if next == eof { + break + } + sb.WriteRune(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return sb.String(), nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + sb.WriteString("\"") + l.next() + case 'n': + sb.WriteString("\n") + l.next() + case 'b': + sb.WriteString("\b") + l.next() + case 'f': + sb.WriteString("\f") + l.next() + case '/': + sb.WriteString("/") + l.next() + case 't': + sb.WriteString("\t") + l.next() + case 'r': + sb.WriteString("\r") + l.next() + case '\\': + sb.WriteString("\\") + l.next() + case 'u': + l.next() + var code strings.Builder + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code.String()) + } + sb.WriteRune(rune(intcode)) + case 'U': + l.next() + var code strings.Builder + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code.String()) + } + sb.WriteRune(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + sb.WriteRune(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go new file mode 100644 index 000000000..9dfe4b9e6 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/localtime.go @@ -0,0 +1,287 @@ +// Implementation of TOML's local date/time. +// +// Copied over from Google's civil to avoid pulling all the Google dependencies. +// Originals: +// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go +// Changes: +// * Renamed files from civil* to localtime*. +// * Package changed from civil to toml. +// * 'Local' prefix added to all structs. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 000000000..571273049 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,1308 @@ +package toml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +const ( + tagFieldName = "toml" + tagFieldComment = "comment" + tagCommented = "commented" + tagMultiline = "multiline" + tagLiteral = "literal" + tagDefault = "default" +) + +type tomlOpts struct { + name string + nameFromTag bool + comment string + commented bool + multiline bool + literal bool + include bool + omitempty bool + defaultValue string +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +type annotation struct { + tag string + comment string + commented string + multiline string + literal string + defaultValue string +} + +var annotationDefault = annotation{ + tag: tagFieldName, + comment: tagFieldComment, + commented: tagCommented, + multiline: tagMultiline, + literal: tagLiteral, + defaultValue: tagDefault, +} + +type MarshalOrder int + +// Orders the Encoder can write the fields to the output stream. +const ( + // Sort fields alphabetically. + OrderAlphabetical MarshalOrder = iota + 1 + // Preserve the order the fields are encountered. For example, the order of fields in + // a struct. + OrderPreserve +) + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + +// Check if the given marshal type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return isTimeType(mtype) + default: + return false + } +} + +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) + default: + return false + } +} + +// Check if the given marshal type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.LocalTime time.LocalTime{}, pointers to same + +For additional flexibility, use the Encoder API. +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts + annotation + line int + col int + order MarshalOrder + promoteAnon bool + compactComments bool + indentation string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3, +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +// Order allows to change in which order fields will be written to the output stream. +func (e *Encoder) Order(ord MarshalOrder) *Encoder { + e.order = ord + return e +} + +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + +// SetTagName allows changing default tag "toml" +func (e *Encoder) SetTagName(v string) *Encoder { + e.tag = v + return e +} + +// SetTagComment allows changing default tag "comment" +func (e *Encoder) SetTagComment(v string) *Encoder { + e.comment = v + return e +} + +// SetTagCommented allows changing default tag "commented" +func (e *Encoder) SetTagCommented(v string) *Encoder { + e.commented = v + return e +} + +// SetTagMultiline allows changing default tag "multiline" +func (e *Encoder) SetTagMultiline(v string) *Encoder { + e.multiline = v + return e +} + +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + +// CompactComments removes the new line before each comment in the tree. +func (e *Encoder) CompactComments(cc bool) *Encoder { + e.compactComments = cc + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + + mtype := reflect.TypeOf(v) + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } + + switch mtype.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Ptr: + if mtype.Elem().Kind() != reflect.Struct { + return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") + } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } + default: + return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") + } + + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) + + return buf.Bytes(), err +} + +// Create next tree with a position based on Encoder.line +func (e *Encoder) nextTree() *Tree { + return newTreeWithPosition(Position{Line: e.line, Col: 1}) +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := e.nextTree() + switch mtype.Kind() { + case reflect.Struct: + switch mval.Interface().(type) { + case Tree: + reflect.ValueOf(tval).Elem().Set(mval) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef, e.annotation) + if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + val = e.wrapTomlValue(val, tval) + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + Literal: opts.literal, + }, val) + } + } + } + } + case reflect.Map: + keys := mval.MapKeys() + if e.order == OrderPreserve && len(keys) > 0 { + // Sorting []reflect.Value is not straight forward. + // + // OrderPreserve will support deterministic results when string is used + // as the key to maps. + typ := keys[0].Type() + kind := keys[0].Kind() + if kind == reflect.String { + ikeys := make([]string, len(keys)) + for i := range keys { + ikeys[i] = keys[i].Interface().(string) + } + sort.Strings(ikeys) + for i := range ikeys { + keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) + } + } + } + for _, key := range keys { + mvalf := mval.MapIndex(key) + if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { + continue + } + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + val = e.wrapTomlValue(val, tval) + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.SetPath([]string{key.String()}, val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + if mtype.Kind() == reflect.Ptr { + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + } + if mtype.Kind() == reflect.Interface { + return e.valueToToml(mval.Elem().Type(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): + return e.valueToOtherSlice(mtype, mval) + case isTreeSequence(mtype): + return e.valueToTreeSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { + return fmt.Sprint(mval), nil + } + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface(), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + +// Create a toml value with the current line number as the position line +func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { + _, isTree := val.(*Tree) + _, isTreeS := val.([]*Tree) + if isTree || isTreeS { + e.line++ + return val + } + + ret := &tomlValue{ + value: val, + position: Position{ + e.line, + parent.position.Col, + }, + } + e.line++ + return ret +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t, tagName: tagFieldName} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// default:"foo" Provides a default value. +// +// For default values, only fields of the following types are supported: +// * string +// * bool +// * int +// * int64 +// * float64 +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts + tagName string + strict bool + visitor visitorState +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + tagName: tagFieldName, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +// SetTagName allows changing default tag "toml" +func (d *Decoder) SetTagName(v string) *Decoder { + d.tagName = v + return d +} + +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } + if mtype.Kind() != reflect.Ptr { + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + elem := mtype.Elem() + + switch elem.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Interface: + elem = mapStringInterfaceType + default: + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + if d.strict { + d.visitor = newVisitorState(d.tval) + } + + sval, err := d.valueFromTree(elem, d.tval, &vv) + if err != nil { + return err + } + if err := d.visitor.validate(); err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if tval == nil { + return mvalPtr.Elem(), nil + } + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + + switch mval.Interface().(type) { + case Tree: + mval.Set(reflect.ValueOf(tval).Elem()) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) + if !opts.include { + continue + } + baseKey := opts.name + keysToTry := []string{ + baseKey, + strings.ToLower(baseKey), + strings.ToTitle(baseKey), + strings.ToLower(string(baseKey[0])) + baseKey[1:], + } + + found := false + if tval != nil { + for _, key := range keysToTry { + exists := tval.HasPath([]string{key}) + if !exists { + continue + } + + d.visitor.push(key) + val := tval.GetPath([]string{key}) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.Field(i).Set(mvalf) + found = true + d.visitor.pop() + break + } + } + + if !found && opts.defaultValue != "" { + mvalf := mval.Field(i) + var val interface{} + var err error + switch mvalf.Kind() { + case reflect.String: + val = opts.defaultValue + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) + case reflect.Int: + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) + case reflect.Int64: + // Check if the provided number has a non-numeric extension. + var hasExtension bool + if len(opts.defaultValue) > 0 { + lastChar := opts.defaultValue[len(opts.defaultValue)-1] + if lastChar < '0' || lastChar > '9' { + hasExtension = true + } + } + // If the value is a time.Duration with extension, parse as duration. + // If the value is an int64 or a time.Duration without extension, parse as number. + if hasExtension && mvalf.Type().String() == "time.Duration" { + val, err = time.ParseDuration(opts.defaultValue) + } else { + val, err = strconv.ParseInt(opts.defaultValue, 10, 64) + } + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) + case reflect.Float64: + val, err = strconv.ParseFloat(opts.defaultValue, 64) + default: + return mvalf, fmt.Errorf("unsupported field type for default option") + } + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) + } + + // save the old behavior above and try to check structs + if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { + tmpTval := tval + if !mtypef.Anonymous { + tmpTval = nil + } + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) + if err != nil { + return v, err + } + mval.Field(i).Set(v) + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + d.visitor.push(key) + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + d.visitor.push(strconv.Itoa(i)) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + d.visitor.pop() + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + switch t := tval.(type) { + case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + + if isTree(mtype) { + return d.valueFromTree(mtype, t, mval11) + } + + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) + } else { + return d.valueFromToml(mval1.Elem().Type(), t, nil) + } + } + + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSequence(mtype) { + return d.valueFromTreeSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + d.visitor.visit() + if isOtherSequence(mtype) { + return d.valueFromOtherSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + d.visitor.visit() + mvalPtr := reflect.New(mtype) + + // Check if pointer to value implements the Unmarshaler interface. + if isCustomUnmarshaler(mvalPtr.Type()) { + if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + // Check if pointer to value implements the encoding.TextUnmarshaler. + if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { + d, err := time.ParseDuration(val.String()) + if err != nil { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) + } + return reflect.ValueOf(d), nil + } + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Interface: + if mval1 == nil || mval1.IsNil() { + return reflect.ValueOf(tval), nil + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + +func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { + tag := vf.Tag.Get(an.tag) + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get(an.comment); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) + multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) + literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) + defaultValue := vf.Tag.Get(tagDefault) + result := tomlOpts{ + name: vf.Name, + nameFromTag: false, + comment: comment, + commented: commented, + multiline: multiline, + literal: literal, + include: true, + omitempty: false, + defaultValue: defaultValue, + } + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml new file mode 100644 index 000000000..792b72ed7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic_lists] + floats = [12.3,45.6,78.9] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + ints = [8001,8001,8002] + uints = [5002,5003] + strings = ["One","Two","Three"] + +[[subdocptrs]] + name = "Second" + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.second] + name = "Second" + + [subdoc.first] + name = "First" + +[basic] + uint = 5001 + bool = true + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + date = 1979-05-27T07:32:00Z + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml new file mode 100644 index 000000000..ba5e110bf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic] + bool = true + date = 1979-05-27T07:32:00Z + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + uint = 5001 + +[basic_lists] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + floats = [12.3,45.6,78.9] + ints = [8001,8001,8002] + strings = ["One","Two","Three"] + uints = [5002,5003] + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.first] + name = "First" + + [subdoc.second] + name = "Second" + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" + +[[subdocptrs]] + name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 000000000..b3726d0dd --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,507 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + case tokenError: + p.raiseError(tok, "parsing error: %s", tok.String()) + default: + p.raiseError(tok, "unexpected token %s", tok.typ) + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err.Error()) + } + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + prefixKey := parsedKey[0 : len(parsedKey)-1] + tableKey = append(tableKey, prefixKey...) + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + case nil: + // create intermediate + if err := p.tree.createSubTree(tableKey, key.Position); err != nil { + p.raiseError(key, "could not create intermediate group: %s", err) + } + targetNode = p.tree.GetPath(tableKey).(*Tree) + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVal := parsedKey[len(parsedKey)-1] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var errInvalidUnderscore = errors.New("invalid use of _ in number") + +func numberContainsInvalidUnderscore(value string) error { + // For large numbers, you may use underscores between digits to enhance + // readability. Each underscore must be surrounded by at least one digit on + // each side. + + hasBefore := false + for idx, r := range value { + if r == '_' { + if !hasBefore || idx+1 >= len(value) { + // can't end with an underscore + return errInvalidUnderscore + } + } + hasBefore = isDigit(r) + } + return nil +} + +var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") + +func hexNumberContainsInvalidUnderscore(value string) error { + hasBefore := false + for idx, r := range value { + if r == '_' { + if !hasBefore || idx+1 >= len(value) { + // can't end with an underscore + return errInvalidUnderscoreHex + } + } + hasBefore = isHexDigit(r) + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + base := 10 + s := cleanedVal + checkInvalidUnderscore := numberContainsInvalidUnderscore + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + checkInvalidUnderscore = hexNumberContainsInvalidUnderscore + base = 16 + case 'o': + base = 8 + case 'b': + base = 2 + default: + panic("invalid base") // the lexer should catch this first + } + s = cleanedVal[2:] + } + + err := checkInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + + var val interface{} + val, err = strconv.ParseInt(s, base, 64) + if err == nil { + return val + } + + if s[0] != '-' { + if val, err = strconv.ParseUint(s, base, 64); err == nil { + return val + } + } + p.raiseError(tok, "%s", err) + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalTime: + val, err := ParseLocalTime(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + // a local date may be followed by: + // * nothing: this is a local date + // * a local time: this is a local date-time + + next := p.peek() + if next == nil || next.typ != tokenLocalTime { + val, err := ParseLocalDate(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + } + + localDate := tok + localTime := p.getToken() + + next = p.peek() + if next == nil || next.typ != tokenTimeOffset { + v := localDate.val + "T" + localTime.val + val, err := ParseLocalDateTime(v) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + } + + offset := p.getToken() + + layout := time.RFC3339Nano + v := localDate.val + "T" + localTime.val + offset.val + val, err := time.ParseInLocation(layout, v, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + default: + panic(fmt.Errorf("unhandled token: %v", tok)) + } + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey, tokenInteger, tokenString: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + + value := p.parseRvalue() + tree.SetPath(parsedKey, value) + case tokenComma: + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + tree.inline = true + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(newTree()) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if reflect.TypeOf(val) != arrayType { + arrayType = nil + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 000000000..c17bff87b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 000000000..b437fdd3b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,136 @@ +package toml + +import "fmt" + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenLocalDate + tokenLocalTime + tokenTimeOffset + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "LocalDate", + "LocalTime", + "TimeOffset", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return '0' <= r && r <= '9' +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 000000000..5541b941f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,533 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + multiline bool + literal bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + inline bool + position Position +} + +func newTree() *Tree { + return newTreeWithPosition(Position{}) +} + +func newTreeWithPosition(pos Position) *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: pos, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetArray returns the value at key in the Tree. +// It returns []string, []int64, etc type if key has homogeneous lists +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArray(key string) interface{} { + if key == "" { + return t + } + return t.GetArrayPath(strings.Split(key, ".")) +} + +// GetArrayPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArrayPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + switch n := node.value.(type) { + case []interface{}: + return getArray(n) + default: + return node.value + } + default: + return node + } +} + +// if homogeneous array, then return slice type object over []interface{} +func getArray(n []interface{}) interface{} { + var s []string + var i64 []int64 + var f64 []float64 + var bl []bool + for _, value := range n { + switch v := value.(type) { + case string: + s = append(s, v) + case int64: + i64 = append(i64, v) + case float64: + f64 = append(f64, v) + case bool: + bl = append(bl, v) + default: + return n + } + } + if len(s) == len(n) { + return s + } else if len(i64) == len(n) { + return i64 + } else if len(f64) == len(n) { + return f64 + } else if len(bl) == len(n) { + return bl + } + return n +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// SetPositionPath sets the position of element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree position is set. +func (t *Tree) SetPositionPath(keys []string, pos Position) { + if len(keys) == 0 { + t.position = pos + return + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + subtree = node[len(node)-1] + default: + return + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + node.position = pos + return + case *Tree: + node.position = pos + return + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + node[len(node)-1].position = pos + return + } +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. +// The default values within the struct are valid default options. +type SetOptions struct { + Comment string + Commented bool + Multiline bool + Literal bool +} + +// SetWithOptions is the same as Set, but allows you to provide formatting +// instructions to the key, that will be used by Marshal(). +func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { + t.SetPathWithOptions(strings.Split(key, "."), opts, value) +} + +// SetPathWithOptions is the same as SetPath, but allows you to provide +// formatting instructions to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { + subtree := t + for i, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = node + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch v := value.(type) { + case *Tree: + v.comment = opts.Comment + v.commented = opts.Commented + toInsert = value + case []*Tree: + for i := range v { + v[i].commented = opts.Commented + } + toInsert = value + case *tomlValue: + v.comment = opts.Comment + v.commented = opts.Commented + v.multiline = opts.Multiline + v.literal = opts.Literal + toInsert = v + default: + toInsert = &tomlValue{value: value, + comment: opts.Comment, + commented: opts.Commented, + multiline: opts.Multiline, + literal: opts.Literal, + position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) +} + +// Delete removes a key from the tree. +// Key is a dot-separated path (e.g. a.b.c). +func (t *Tree) Delete(key string) error { + keys, err := parseKey(key) + if err != nil { + return err + } + return t.DeletePath(keys) +} + +// DeletePath removes a key from the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +func (t *Tree) DeletePath(keys []string) error { + keyLen := len(keys) + if keyLen == 1 { + delete(t.values, keys[0]) + return nil + } + tree := t.GetPath(keys[:keyLen-1]) + item := keys[keyLen-1] + switch node := tree.(type) { + case *Tree: + delete(node.values, item) + return nil + } + return errors.New("no such key to delete") +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for i, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + tree.position = pos + tree.inline = subtree.inline + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = fmt.Errorf("%s", r) + } + }() + + if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { + b = b[4:] + } else if len(b) >= 3 && hasUTF8BOM3(b) { + b = b[3:] + } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { + b = b[2:] + } + + tree = parseToml(lexToml(b)) + return +} + +func hasUTF16BigEndianBOM2(b []byte) bool { + return b[0] == 0xFE && b[1] == 0xFF +} + +func hasUTF16LittleEndianBOM2(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE +} + +func hasUTF8BOM3(b []byte) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +func hasUTF32BigEndianBOM4(b []byte) bool { + return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF +} + +func hasUTF32LittleEndianBOM4(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go new file mode 100644 index 000000000..4136b4625 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomlpub.go @@ -0,0 +1,71 @@ +package toml + +// PubTOMLValue wrapping tomlValue in order to access all properties from outside. +type PubTOMLValue = tomlValue + +func (ptv *PubTOMLValue) Value() interface{} { + return ptv.value +} +func (ptv *PubTOMLValue) Comment() string { + return ptv.comment +} +func (ptv *PubTOMLValue) Commented() bool { + return ptv.commented +} +func (ptv *PubTOMLValue) Multiline() bool { + return ptv.multiline +} +func (ptv *PubTOMLValue) Position() Position { + return ptv.position +} + +func (ptv *PubTOMLValue) SetValue(v interface{}) { + ptv.value = v +} +func (ptv *PubTOMLValue) SetComment(s string) { + ptv.comment = s +} +func (ptv *PubTOMLValue) SetCommented(c bool) { + ptv.commented = c +} +func (ptv *PubTOMLValue) SetMultiline(m bool) { + ptv.multiline = m +} +func (ptv *PubTOMLValue) SetPosition(p Position) { + ptv.position = p +} + +// PubTree wrapping Tree in order to access all properties from outside. +type PubTree = Tree + +func (pt *PubTree) Values() map[string]interface{} { + return pt.values +} + +func (pt *PubTree) Comment() string { + return pt.comment +} + +func (pt *PubTree) Commented() bool { + return pt.commented +} + +func (pt *PubTree) Inline() bool { + return pt.inline +} + +func (pt *PubTree) SetValues(v map[string]interface{}) { + pt.values = v +} + +func (pt *PubTree) SetComment(c string) { + pt.comment = c +} + +func (pt *PubTree) SetCommented(c bool) { + pt.commented = c +} + +func (pt *PubTree) SetInline(i bool) { + pt.inline = i +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 000000000..80353500a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,155 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + case []interface{}: + value := reflect.ValueOf(original) + length := value.Len() + arrayValue := reflect.MakeSlice(value.Type(), 0, length) + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return arrayValue.Interface(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 000000000..c9afbdab7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,552 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type valueComplexity int + +const ( + valueSimple valueComplexity = iota + 1 + valueComplex +) + +type sortNode struct { + key string + complexity valueComplexity +} + +// Encodes a string to a TOML-compliant multi-line string value +// This function is a clone of the existing encodeTomlString function, except that whitespace characters +// are preserved. Quotation marks and backslashes are also not escaped. +func encodeMultilineTomlString(value string, commented string) string { + var b bytes.Buffer + adjacentQuoteCount := 0 + + b.WriteString(commented) + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString("\t") + case '\n': + b.WriteString("\n" + commented) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString("\r") + case '"': + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } + case '\\': + b.WriteString(`\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +// Encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { + // this interface check is added to dereference the change made in the writeTo function. + // That change was made to allow this function to see formatting options. + tv, ok := v.(*tomlValue) + if ok { + v = tv.value + } else { + tv = &tomlValue{} + } + + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil + case string: + if tv.multiline { + if tv.literal { + b := strings.Builder{} + b.WriteString("'''\n") + b.Write([]byte(value)) + b.WriteString("\n'''") + return b.String(), nil + } else { + return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil + } + } + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return string(b), nil + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for _, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(commented + value) + stringBuffer.WriteString(`,`) + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + commented + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ", ") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func getTreeArrayLine(trees []*Tree) (line int) { + // Prevent returning 0 for empty trees + line = int(^uint(0) >> 1) + // get lowest line number >= 0 + for _, tv := range trees { + if tv.position.Line < line || line == 0 { + line = tv.position.Line + } + } + return +} + +func sortByLines(t *Tree) (vals []sortNode) { + var ( + line int + lines []int + tv *Tree + tom *tomlValue + node sortNode + ) + vals = make([]sortNode, 0) + m := make(map[int]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree: + tv = v.(*Tree) + line = tv.position.Line + node = sortNode{key: k, complexity: valueComplex} + case []*Tree: + line = getTreeArrayLine(v.([]*Tree)) + node = sortNode{key: k, complexity: valueComplex} + default: + tom = v.(*tomlValue) + line = tom.position.Line + node = sortNode{key: k, complexity: valueSimple} + } + lines = append(lines, line) + vals = append(vals, node) + m[line] = node + } + sort.Ints(lines) + + for i, line := range lines { + vals[i] = m[line] + } + + return vals +} + +func sortAlphabetical(t *Tree) (vals []sortNode) { + var ( + node sortNode + simpVals []string + compVals []string + ) + vals = make([]sortNode, 0) + m := make(map[string]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + node = sortNode{key: k, complexity: valueComplex} + compVals = append(compVals, node.key) + default: + node = sortNode{key: k, complexity: valueSimple} + simpVals = append(simpVals, node.key) + } + vals = append(vals, node) + m[node.key] = node + } + + // Simples first to match previous implementation + sort.Strings(simpVals) + i := 0 + for _, key := range simpVals { + vals[i] = m[key] + i++ + } + + sort.Strings(compVals) + for _, key := range compVals { + vals[i] = m[key] + i++ + } + + return vals +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) +} + +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { + var orderedVals []sortNode + + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + for _, node := range orderedVals { + switch node.complexity { + case valueComplex: + k := node.key + v := t.values[k] + + combinedKey := quoteKeyIfNeeded(k) + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if parentCommented || t.commented || tv.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + var commented string + if parentCommented || t.commented || subTree.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) + if err != nil { + return bytesCount, err + } + } + } + default: // Simple + k := node.key + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + var commented string + if parentCommented || t.commented || v.commented { + commented = "# " + } + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + if !compactComments { + writtenBytesCountComment, errc := writeStrings(w, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + } + + return bytesCount, nil +} + +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + b, err := t.Marshal() + if err != nil { + return "", err + } + return string(b), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = tomlValueToGo(node.value) + } + } + return result +} + +func tomlValueToGo(v interface{}) interface{} { + if tree, ok := v.(*Tree); ok { + return tree.ToMap() + } + + rv := reflect.ValueOf(v) + + if rv.Kind() != reflect.Slice { + return v + } + values := make([]interface{}, rv.Len()) + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + values[i] = tomlValueToGo(item) + } + return values +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go new file mode 100644 index 000000000..fa326308c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go @@ -0,0 +1,6 @@ +package toml + +// ValueStringRepresentation transforms an interface{} value into its toml string representation. +func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { + return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) +} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore new file mode 100644 index 000000000..c7b459e4d --- /dev/null +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -0,0 +1,39 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore +# swap +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +# session +Session.vim +# temporary +.netrwhist +*~ +# auto-generated tag files +tags + +*.exe +cobra.test +bin + +.idea/ +*.iml diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml new file mode 100644 index 000000000..2578d94b5 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -0,0 +1,62 @@ +# Copyright 2013-2023 The Cobra Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +run: + deadline: 5m + +linters: + disable-all: true + enable: + #- bodyclose + - deadcode + #- depguard + #- dogsled + #- dupl + - errcheck + #- exhaustive + #- funlen + - gas + #- gochecknoinits + - goconst + #- gocritic + #- gocyclo + #- gofmt + - goimports + - golint + #- gomnd + #- goprintffuncname + #- gosec + #- gosimple + - govet + - ineffassign + - interfacer + #- lll + - maligned + - megacheck + #- misspell + #- nakedret + #- noctx + #- nolintlint + #- rowserrcheck + #- scopelint + #- staticcheck + - structcheck + #- stylecheck + #- typecheck + - unconvert + #- unparam + #- unused + - varcheck + #- whitespace + fast: false diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap new file mode 100644 index 000000000..94ec53068 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.mailmap @@ -0,0 +1,3 @@ +Steve Francia +Bjørn Erik Pedersen +Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md new file mode 100644 index 000000000..9d16f88fd --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONDUCT.md @@ -0,0 +1,37 @@ +## Cobra User Contract + +### Versioning +Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. + +### Backward Compatibility +We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. + +### Deprecation +Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. + +### CVE +Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. + +### Communication +Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. + +### Breaking Changes +Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. + +There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. + +Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. + +Examples of breaking changes include: +- Removing or renaming exported constant, variable, type, or function. +- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... + - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. + +There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. + +### CI Testing +Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. + +### Disclaimer +Changes to this document and the contents therein are at the discretion of the maintainers. +None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md new file mode 100644 index 000000000..6f356e6a8 --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# Contributing to Cobra + +Thank you so much for contributing to Cobra. We appreciate your time and help. +Here are some guidelines to help you get started. + +## Code of Conduct + +Be kind and respectful to the members of the community. Take time to educate +others who are seeking help. Harassment of any kind will not be tolerated. + +## Questions + +If you have questions regarding Cobra, feel free to ask it in the community +[#cobra Slack channel][cobra-slack] + +## Filing a bug or feature + +1. Before filing an issue, please check the existing issues to see if a + similar one was already opened. If there is one already opened, feel free + to comment on it. +1. If you believe you've found a bug, please provide detailed steps of + reproduction, the version of Cobra and anything else you believe will be + useful to help troubleshoot it (e.g. OS environment, environment variables, + etc...). Also state the current behavior vs. the expected behavior. +1. If you'd like to see a feature or an enhancement please open an issue with + a clear title and description of what the feature is and why it would be + beneficial to the project and its users. + +## Submitting changes + +1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to + sign a CLA. Please sign the CLA :slightly_smiling_face: +1. Tests: If you are submitting code, please ensure you have adequate tests + for the feature. Tests can be run via `go test ./...` or `make test`. +1. Since this is golang project, ensure the new code is properly formatted to + ensure code consistency. Run `make all`. + +### Quick steps to contribute + +1. Fork the project. +1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Make changes and run tests (`make test`) +1. Add them to staging (`git add .`) +1. Commit your changes (`git commit -m 'Add some feature'`) +1. Push to the branch (`git push origin my-new-feature`) +1. Create new pull request + + +[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199 diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 000000000..298f0e266 --- /dev/null +++ b/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS new file mode 100644 index 000000000..4c5ac3dd9 --- /dev/null +++ b/vendor/github.com/spf13/cobra/MAINTAINERS @@ -0,0 +1,13 @@ +maintainers: +- spf13 +- johnSchnake +- jpmcb +- marckhouzam +inactive: +- anthonyfok +- bep +- bogem +- broady +- eparis +- jharshman +- wfernandes diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile new file mode 100644 index 000000000..0da8d7aa0 --- /dev/null +++ b/vendor/github.com/spf13/cobra/Makefile @@ -0,0 +1,35 @@ +BIN="./bin" +SRC=$(shell find . -name "*.go") + +ifeq (, $(shell which golangci-lint)) +$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") +endif + +.PHONY: fmt lint test install_deps clean + +default: all + +all: fmt test + +fmt: + $(info ******************** checking formatting ********************) + @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) + +lint: + $(info ******************** running lint tools ********************) + golangci-lint run -v + +test: install_deps + $(info ******************** running tests ********************) + go test -v ./... + +richtest: install_deps + $(info ******************** running tests with kyoh86/richgo ********************) + richgo test -v ./... + +install_deps: + $(info ******************** downloading dependencies ********************) + go get -v ./... + +clean: + rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 000000000..592c0b8ab --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,112 @@ +![cobra logo](assets/CobraMain.png) + +Cobra is a library for creating powerful modern CLI applications. + +Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), +[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to +name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. + +[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) +[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) + +# Overview + +Cobra is a library providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. + +Cobra provides: +* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. +* Fully POSIX-compliant flags (including short & long versions) +* Nested subcommands +* Global, local and cascading flags +* Intelligent suggestions (`app srver`... did you mean `app server`?) +* Automatic help generation for commands and flags +* Grouping help for subcommands +* Automatic help flag recognition of `-h`, `--help`, etc. +* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) +* Automatically generated man pages for your application +* Command aliases so you can change things without breaking them +* The flexibility to define your own help, usage, etc. +* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps + +# Concepts + +Cobra is built on a structure of commands, arguments & flags. + +**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. + +The best applications read like sentences when used, and as a result, users +intuitively know how to interact with them. + +The pattern to follow is +`APPNAME VERB NOUN --ADJECTIVE` + or +`APPNAME COMMAND ARG --FLAG`. + +A few good real world examples may better illustrate this point. + +In the following example, 'server' is a command, and 'port' is a flag: + + hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) + +## Flags + +A flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/spf13/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. + +``` +go get -u github.com/spf13/cobra@latest +``` + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Usage +`cobra-cli` is a command line program to generate cobra applications and command files. +It will bootstrap your application scaffolding to rapidly +develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application. + +It can be installed by running: + +``` +go install github.com/spf13/cobra-cli@latest +``` + +For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) + +For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). + +# License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go new file mode 100644 index 000000000..2d0239437 --- /dev/null +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -0,0 +1,63 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "os" + "strings" +) + +const ( + activeHelpMarker = "_activeHelp_ " + // The below values should not be changed: programs will be using them explicitly + // in their user documentation, and users will be using them explicitly. + activeHelpEnvVarSuffix = "_ACTIVE_HELP" + activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpGlobalDisable = "0" +) + +// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. +// Such strings will be processed by the completion script and will be shown as ActiveHelp +// to the user. +// The array parameter should be the array that will contain the completions. +// This function can be called multiple times before and/or after completions are added to +// the array. Each time this function is called with the same array, the new +// ActiveHelp line will be shown below the previous ones when completion is triggered. +func AppendActiveHelp(compArray []string, activeHelpStr string) []string { + return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) +} + +// GetActiveHelpConfig returns the value of the ActiveHelp environment variable +// _ACTIVE_HELP where is the name of the root command in upper +// case, with all - replaced by _. +// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP +// is set to "0". +func GetActiveHelpConfig(cmd *Command) string { + activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar) + if activeHelpCfg != activeHelpGlobalDisable { + activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name())) + } + return activeHelpCfg +} + +// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment +// variable. It has the format _ACTIVE_HELP where is the name of the +// root command in upper case, with all - replaced by _. +func activeHelpEnvVar(name string) string { + // This format should not be changed: users will be using it explicitly. + activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) + return strings.ReplaceAll(activeHelpEnvVar, "-", "_") +} diff --git a/vendor/github.com/spf13/cobra/active_help.md b/vendor/github.com/spf13/cobra/active_help.md new file mode 100644 index 000000000..5e7f59af3 --- /dev/null +++ b/vendor/github.com/spf13/cobra/active_help.md @@ -0,0 +1,157 @@ +# Active Help + +Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion. + +For example, +``` +bash-5.1$ helm repo add [tab] +You must choose a name for the repo you are adding. + +bash-5.1$ bin/helm package [tab] +Please specify the path to the chart to package + +bash-5.1$ bin/helm package [tab][tab] +bin/ internal/ scripts/ pkg/ testdata/ +``` + +**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program. +## Supported shells + +Active Help is currently only supported for the following shells: +- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed. +- Zsh + +## Adding Active Help messages + +As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md). + +Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details. + +### Active Help for nouns + +Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example: + +```go +cmd := &cobra.Command{ + Use: "add [NAME] [URL]", + Short: "add a chart repository", + Args: require.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return addRepo(args) + }, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + if len(args) == 0 { + comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") + } else if len(args) == 1 { + comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") + } else { + comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") + } + return comps, cobra.ShellCompDirectiveNoFileComp + }, +} +``` +The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior: +``` +bash-5.1$ helm repo add [tab] +You must choose a name for the repo you are adding + +bash-5.1$ helm repo add grafana [tab] +You must specify the URL for the repo you are adding + +bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab] +This command does not take any more arguments +``` +**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions. + +### Active Help for flags + +Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example: +```go +_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) != 2 { + return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp + } + return compVersionFlag(args[1], toComplete) + }) +``` +The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag. +``` +bash-5.1$ bin/helm install myrelease --version 2.0.[tab] +You must first specify the chart to install before the --version flag can be completed + +bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab] +2.0.1 2.0.2 2.0.3 +``` + +## User control of Active Help + +You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any. +Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration. + +The way to configure Active Help is to use the program's Active Help environment +variable. That variable is named `_ACTIVE_HELP` where `` is the name of your +program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever +Active Help configuration values are supported by the program. + +For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user +would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell. + +For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the +Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages +should or should not be added (instead of reading the environment variable directly). + +For example: +```go +ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + activeHelpLevel := cobra.GetActiveHelpConfig(cmd) + + var comps []string + if len(args) == 0 { + if activeHelpLevel != "off" { + comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") + } + } else if len(args) == 1 { + if activeHelpLevel != "off" { + comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") + } + } else { + if activeHelpLevel == "local" { + comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") + } + } + return comps, cobra.ShellCompDirectiveNoFileComp +}, +``` +**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly. + +**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to. + +**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string. +## Active Help with Cobra's default completion command + +Cobra provides a default `completion` command for programs that wish to use it. +When using the default `completion` command, Active Help is configurable in the same +fashion as described above using environment variables. You may wish to document this in more +details for your users. + +## Debugging Active Help + +Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details. + +When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below: +``` +$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h +bitnami/haproxy +bitnami/harbor +_activeHelp_ WARNING: cannot re-use a name that is still in use +:0 +Completion ended with directive: ShellCompDirectiveDefault + +$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h +bitnami/haproxy +bitnami/harbor +:0 +Completion ended with directive: ShellCompDirectiveDefault +``` diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go new file mode 100644 index 000000000..e79ec33a8 --- /dev/null +++ b/vendor/github.com/spf13/cobra/args.go @@ -0,0 +1,131 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "strings" +) + +type PositionalArgs func(cmd *Command, args []string) error + +// legacyArgs validation has the following behaviour: +// - root commands with no subcommands can take arbitrary arguments +// - root commands with subcommands will do subcommand validity checking +// - subcommands will always accept arbitrary arguments +func legacyArgs(cmd *Command, args []string) error { + // no subcommand, always take args + if !cmd.HasSubCommands() { + return nil + } + + // root command with subcommands, do subcommand checking. + if !cmd.HasParent() && len(args) > 0 { + return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + return nil +} + +// NoArgs returns an error if any args are included. +func NoArgs(cmd *Command, args []string) error { + if len(args) > 0 { + return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) + } + return nil +} + +// OnlyValidArgs returns an error if there are any positional args that are not in +// the `ValidArgs` field of `Command` +func OnlyValidArgs(cmd *Command, args []string) error { + if len(cmd.ValidArgs) > 0 { + // Remove any description that may be included in ValidArgs. + // A description is following a tab character. + var validArgs []string + for _, v := range cmd.ValidArgs { + validArgs = append(validArgs, strings.Split(v, "\t")[0]) + } + for _, v := range args { + if !stringInSlice(v, validArgs) { + return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + } + } + return nil +} + +// ArbitraryArgs never returns an error. +func ArbitraryArgs(cmd *Command, args []string) error { + return nil +} + +// MinimumNArgs returns an error if there is not at least N args. +func MinimumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < n { + return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) + } + return nil + } +} + +// MaximumNArgs returns an error if there are more than N args. +func MaximumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) > n { + return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// ExactArgs returns an error if there are not exactly n args. +func ExactArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) != n { + return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// RangeArgs returns an error if the number of args is not within the expected range. +func RangeArgs(min int, max int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < min || len(args) > max { + return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) + } + return nil + } +} + +// MatchAll allows combining several PositionalArgs to work in concert. +func MatchAll(pargs ...PositionalArgs) PositionalArgs { + return func(cmd *Command, args []string) error { + for _, parg := range pargs { + if err := parg(cmd, args); err != nil { + return err + } + } + return nil + } +} + +// ExactValidArgs returns an error if there are not exactly N positional args OR +// there are any positional args that are not in the `ValidArgs` field of `Command` +// +// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead +func ExactValidArgs(n int) PositionalArgs { + return MatchAll(ExactArgs(n), OnlyValidArgs) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 000000000..10c78847d --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,712 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +// Annotations for Bash completion. +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" + BashCompCustom = "cobra_annotation_bash_completion_custom" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func writePreamble(buf io.StringWriter, name string) { + WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__%[1]s_index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__%[1]s_contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__%[1]s_handle_go_custom_completion() +{ + __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" + + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + local out requestComp lastParam lastChar comp directive args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + # Disable ActiveHelp which is not supported for bash completion v1 + requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi + + __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" + __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no space" + compopt -o nospace + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" + compopt +o default + fi + fi + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + # Do not use quotes around the $out variable or else newline + # characters will be kept. + for filter in ${out}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subdir + # Use printf to strip any trailing newline + subdir=$(printf "%%s" "${out}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + __%[1]s_handle_subdirs_in_dir_flag "$subdir" + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${out}" -- "$cur") + fi +} + +__%[1]s_handle_reply() +{ + __%[1]s_debug "${FUNCNAME[0]}" + local comp + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${allflags[*]}" -- "$cur") + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%%=*}" + __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" + COMPREPLY=() + if [[ ${index} -ge 0 ]]; then + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION:-}" ]; then + # zsh completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + + if [[ -z "${flag_parsing_disabled}" ]]; then + # If flag parsing is enabled, we have completed the flags and can return. + # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough + # to possibly call handle_go_custom_completion. + return 0; + fi + ;; + esac + + # check if we are handling a flag with special work handling + local index + __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + completions=("${commands[@]}") + if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions+=("${must_have_one_noun[@]}") + elif [[ -n "${has_completion_function}" ]]; then + # if a go completion function is provided, defer to that function + __%[1]s_handle_go_custom_completion + fi + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions+=("${must_have_one_flag[@]}") + fi + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${completions[*]}" -- "$cur") + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${noun_aliases[*]}" -- "$cur") + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + if declare -F __%[1]s_custom_func >/dev/null; then + # try command name qualified custom func + __%[1]s_custom_func + else + # otherwise fall back to unqualified for compatibility + declare -F __custom_func >/dev/null && __custom_func + fi + fi + + # available in bash-completion >= 2, not always present on macOS + if declare -F __ltrim_colon_completions >/dev/null; then + __ltrim_colon_completions "$cur" + fi + + # If there is only 1 completion and it is a flag with an = it will be completed + # but we don't want a space after the = + if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then + compopt -o nospace + fi +} + +# The arguments should be in the form "ext1|ext2|extn" +__%[1]s_handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__%[1]s_handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return +} + +__%[1]s_handle_flag() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue="" + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" + if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # if you set a flag which only applies to this command, don't show subcommands + if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + commands=() + fi + + # keep flag value with flagname as flaghash + # flaghash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + fi + + # skip the argument to a two word flag + if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then + __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__%[1]s_handle_noun() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__%[1]s_handle_command() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_%[1]s_root_command" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F "$next_command" >/dev/null && $next_command +} + +__%[1]s_handle_word() +{ + if [[ $c -ge $cword ]]; then + __%[1]s_handle_reply + return + fi + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __%[1]s_handle_flag + elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then + __%[1]s_handle_command + elif [[ $c -eq 0 ]]; then + __%[1]s_handle_command + elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then + # aliashash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then + words[c]=${aliashash[${words[c]}]} + __%[1]s_handle_command + else + __%[1]s_handle_noun + fi + else + __%[1]s_handle_noun + fi + __%[1]s_handle_word +} + +`, name, ShellCompNoDescRequestCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) +} + +func writePostscript(buf io.StringWriter, name string) { + name = strings.ReplaceAll(name, ":", "__") + WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(`{ + local cur prev words cword split + declare -A flaghash 2>/dev/null || : + declare -A aliashash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __%[1]s_init_completion -n "=" || return + fi + + local c=0 + local flag_parsing_disabled= + local flags=() + local two_word_flags=() + local local_nonpersistent_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%[1]s") + local command_aliases=() + local must_have_one_flag=() + local must_have_one_noun=() + local has_completion_function="" + local last_command="" + local nouns=() + local noun_aliases=() + + __%[1]s_handle_word +} + +`, name)) + WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name)) + WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") +} + +func writeCommands(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " commands=()\n") + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() && c != cmd.helpCommand { + continue + } + WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) + writeCmdAliases(buf, c) + } + WriteStringAndCheck(buf, "\n") +} + +func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) > 0 { + ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") + } else { + ext = "_filedir" + } + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + case BashCompCustom: + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + if len(value) > 0 { + handlers := strings.Join(value, "; ") + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + } else { + WriteStringAndCheck(buf, " flags_completion+=(:)\n") + } + case BashCompSubdirsInDir: + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) == 1 { + ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] + } else { + ext = "_filedir -d" + } + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + } + } +} + +const cbn = "\")\n" + +func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { + name := flag.Shorthand + format := " " + if len(flag.NoOptDefVal) == 0 { + format += "two_word_" + } + format += "flags+=(\"-%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) +} + +func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { + name := flag.Name + format := " flags+=(\"--%s" + if len(flag.NoOptDefVal) == 0 { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + if len(flag.NoOptDefVal) == 0 { + format = " two_word_flags+=(\"--%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + } + writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) +} + +func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { + name := flag.Name + format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn + if len(flag.NoOptDefVal) == 0 { + format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn + } + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) + } +} + +// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags +func prepareCustomAnnotationsForFlags(cmd *Command) { + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + for flag := range flagCompletionFunctions { + // Make sure the completion script calls the __*_go_custom_completion function for + // every registered flag. We need to do this here (and not when the flag was registered + // for completion) so that we can know the root command name for the prefix + // of ___go_custom_completion + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} + } +} + +func writeFlags(buf io.StringWriter, cmd *Command) { + prepareCustomAnnotationsForFlags(cmd) + WriteStringAndCheck(buf, ` flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + +`) + + if cmd.DisableFlagParsing { + WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") + } + + localNonPersistentFlags := cmd.LocalNonPersistentFlags() + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + // localNonPersistentFlags are used to stop the completion of subcommands when one is set + // if TraverseChildren is true we should allow to complete subcommands + if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren { + writeLocalNonPersistentFlag(buf, flag) + } + }) + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + }) + + WriteStringAndCheck(buf, "\n") +} + +func writeRequiredFlag(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_flag=()\n") + flags := cmd.NonInheritedFlags() + flags.VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) + } + } + } + }) +} + +func writeRequiredNouns(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_noun=()\n") + sort.Strings(cmd.ValidArgs) + for _, value := range cmd.ValidArgs { + // Remove any description that may be included following a tab character. + // Descriptions are not supported by bash completion. + value = strings.Split(value, "\t")[0] + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + } + if cmd.ValidArgsFunction != nil { + WriteStringAndCheck(buf, " has_completion_function=1\n") + } +} + +func writeCmdAliases(buf io.StringWriter, cmd *Command) { + if len(cmd.Aliases) == 0 { + return + } + + sort.Strings(cmd.Aliases) + + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) + for _, value := range cmd.Aliases { + WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + } + WriteStringAndCheck(buf, ` fi`) + WriteStringAndCheck(buf, "\n") +} +func writeArgAliases(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " noun_aliases=()\n") + sort.Strings(cmd.ArgAliases) + for _, value := range cmd.ArgAliases { + WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + } +} + +func gen(buf io.StringWriter, cmd *Command) { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() && c != cmd.helpCommand { + continue + } + gen(buf, c) + } + commandName := cmd.CommandPath() + commandName = strings.ReplaceAll(commandName, " ", "_") + commandName = strings.ReplaceAll(commandName, ":", "__") + + if cmd.Root() == cmd { + WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + } else { + WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) + } + + WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) + WriteStringAndCheck(buf, "\n") + WriteStringAndCheck(buf, " command_aliases=()\n") + WriteStringAndCheck(buf, "\n") + + writeCommands(buf, cmd) + writeFlags(buf, cmd) + writeRequiredFlag(buf, cmd) + writeRequiredNouns(buf, cmd) + writeArgAliases(buf, cmd) + WriteStringAndCheck(buf, "}\n\n") +} + +// GenBashCompletion generates bash completion file and writes to the passed writer. +func (c *Command) GenBashCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + writePreamble(buf, c.Name()) + if len(c.BashCompletionFunction) > 0 { + buf.WriteString(c.BashCompletionFunction + "\n") + } + gen(buf, c) + writePostscript(buf, c.Name()) + + _, err := buf.WriteTo(w) + return err +} + +func nonCompletableFlag(flag *pflag.Flag) bool { + return flag.Hidden || len(flag.Deprecated) > 0 +} + +// GenBashCompletionFile generates bash completion file. +func (c *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletion(outFile) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md new file mode 100644 index 000000000..52919b2fa --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -0,0 +1,93 @@ +# Generating Bash Completions For Your cobra.Command + +Please refer to [Shell Completions](shell_completions.md) for details. + +## Bash legacy dynamic completions + +For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. + +**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. + +The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. + +Some code that works in kubernetes: + +```bash +const ( + bash_completion_func = `__kubectl_parse_get() +{ + local kubectl_output out + if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then + out=($(echo "${kubectl_output}" | awk '{print $1}')) + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__kubectl_get_resource() +{ + if [[ ${#nouns[@]} -eq 0 ]]; then + return 1 + fi + __kubectl_parse_get ${nouns[${#nouns[@]} -1]} + if [[ $? -eq 0 ]]; then + return 0 + fi +} + +__kubectl_custom_func() { + case ${last_command} in + kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) + __kubectl_get_resource + return + ;; + *) + ;; + esac +} +`) +``` + +And then I set that in my command definition: + +```go +cmds := &cobra.Command{ + Use: "kubectl", + Short: "kubectl controls the Kubernetes cluster manager", + Long: `kubectl controls the Kubernetes cluster manager. + +Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, + Run: runHelp, + BashCompletionFunction: bash_completion_func, +} +``` + +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! + +Similarly, for flags: + +```go + annotation := make(map[string][]string) + annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} + + flag := &pflag.Flag{ + Name: "namespace", + Usage: usage, + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` +value, e.g.: + +```bash +__kubectl_get_namespaces() +{ + local template + template="{{ range .items }}{{ .metadata.name }} {{ end }}" + local kubectl_out + if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then + COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) + fi +} +``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go new file mode 100644 index 000000000..19b09560c --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -0,0 +1,396 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genBashComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genBashComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + + WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Macs have bash3 for which the bash-completion package doesn't include +# _init_completion. This is a minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +# This function calls the %[1]s program to obtain the completion +# results and the directive. It fills the 'out' and 'directive' vars. +__%[1]s_get_completion_results() { + local requestComp lastParam lastChar args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" + + if [[ -z ${cur} && ${lastChar} != = ]]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} ''" + fi + + # When completing a flag with an = (e.g., %[1]s -n=) + # bash focuses on the part after the =, so we need to remove + # the flag part from $cur + if [[ ${cur} == -*=* ]]; then + cur="${cur#*=}" + fi + + __%[1]s_debug "Calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [[ ${directive} == "${out}" ]]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "The completion directive is: ${directive}" + __%[1]s_debug "The completions are: ${out}" +} + +__%[1]s_process_completion_results() { + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d + + if (((directive & shellCompDirectiveError) != 0)); then + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + else + if (((directive & shellCompDirectiveNoSpace) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + __%[1]s_debug "Activating no space" + compopt -o nospace + else + __%[1]s_debug "No space directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveKeepOrder) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + # no sort isn't supported for bash less than < 4.4 + if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then + __%[1]s_debug "No sort directive not supported in this version of bash" + else + __%[1]s_debug "Activating keep order" + compopt -o nosort + fi + else + __%[1]s_debug "No sort directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveNoFileComp) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + __%[1]s_debug "Activating no file completion" + compopt +o default + else + __%[1]s_debug "No file completion directive not supported in this version of bash" + fi + fi + fi + + # Separate activeHelp from normal completions + local completions=() + local activeHelp=() + __%[1]s_extract_activeHelp + + if (((directive & shellCompDirectiveFilterFileExt) != 0)); then + # File extension filtering + local fullFilter filter filteringCmd + + # Do not use quotes around the $completions variable or else newline + # characters will be kept. + for filter in ${completions[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif (((directive & shellCompDirectiveFilterDirs) != 0)); then + # File completion for directories only + + local subdir + subdir=${completions[0]} + if [[ -n $subdir ]]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + __%[1]s_handle_completion_types + fi + + __%[1]s_handle_special_char "$cur" : + __%[1]s_handle_special_char "$cur" = + + # Print the activeHelp statements before we finish + if ((${#activeHelp[*]} != 0)); then + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" + fi + fi +} + +# Separate activeHelp lines from real completions. +# Fills the $activeHelp and $completions arrays. +__%[1]s_extract_activeHelp() { + local activeHelpMarker="%[9]s" + local endIndex=${#activeHelpMarker} + + while IFS='' read -r comp; do + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then + comp=${comp:endIndex} + __%[1]s_debug "ActiveHelp found: $comp" + if [[ -n $comp ]]; then + activeHelp+=("$comp") + fi + else + # Not an activeHelp line but a normal completion + completions+=("$comp") + fi + done <<<"${out}" +} + +__%[1]s_handle_completion_types() { + __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" + + case $COMP_TYPE in + 37|42) + # Type: menu-complete/menu-complete-backward and insert-completions + # If the user requested inserting one completion at a time, or all + # completions at once on the command-line we must remove the descriptions. + # https://github.com/spf13/cobra/issues/1508 + local tab=$'\t' comp + while IFS='' read -r comp; do + [[ -z $comp ]] && continue + # Strip any description + comp=${comp%%%%$tab*} + # Only consider the completions that match + if [[ $comp == "$cur"* ]]; then + COMPREPLY+=("$comp") + fi + done < <(printf "%%s\n" "${completions[@]}") + ;; + + *) + # Type: complete (normal completion) + __%[1]s_handle_standard_completion_case + ;; + esac +} + +__%[1]s_handle_standard_completion_case() { + local tab=$'\t' comp + + # Short circuit to optimize if we don't have descriptions + if [[ "${completions[*]}" != *$tab* ]]; then + IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + return 0 + fi + + local longest=0 + local compline + # Look for the longest completion so that we can format things nicely + while IFS='' read -r compline; do + [[ -z $compline ]] && continue + # Strip any description before checking the length + comp=${compline%%%%$tab*} + # Only consider the completions that match + [[ $comp == "$cur"* ]] || continue + COMPREPLY+=("$compline") + if ((${#comp}>longest)); then + longest=${#comp} + fi + done < <(printf "%%s\n" "${completions[@]}") + + # If there is a single completion left, remove the description text + if ((${#COMPREPLY[*]} == 1)); then + __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" + comp="${COMPREPLY[0]%%%%$tab*}" + __%[1]s_debug "Removed description from single completion, which is now: ${comp}" + COMPREPLY[0]=$comp + else # Format the descriptions + __%[1]s_format_comp_descriptions $longest + fi +} + +__%[1]s_handle_special_char() +{ + local comp="$1" + local char=$2 + if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then + local word=${comp%%"${comp##*${char}}"} + local idx=${#COMPREPLY[*]} + while ((--idx >= 0)); do + COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} + done + fi +} + +__%[1]s_format_comp_descriptions() +{ + local tab=$'\t' + local comp desc maxdesclength + local longest=$1 + + local i ci + for ci in ${!COMPREPLY[*]}; do + comp=${COMPREPLY[ci]} + # Properly format the description string which follows a tab character if there is one + if [[ "$comp" == *$tab* ]]; then + __%[1]s_debug "Original comp: $comp" + desc=${comp#*$tab} + comp=${comp%%%%$tab*} + + # $COLUMNS stores the current shell width. + # Remove an extra 4 because we add 2 spaces and 2 parentheses. + maxdesclength=$(( COLUMNS - longest - 4 )) + + # Make sure we can fit a description of at least 8 characters + # if we are to align the descriptions. + if ((maxdesclength > 8)); then + # Add the proper number of spaces to align the descriptions + for ((i = ${#comp} ; i < longest ; i++)); do + comp+=" " + done + else + # Don't pad the descriptions so we can fit more text after the completion + maxdesclength=$(( COLUMNS - ${#comp} - 4 )) + fi + + # If there is enough space for any description text, + # truncate the descriptions that are too long for the shell width + if ((maxdesclength > 0)); then + if ((${#desc} > maxdesclength)); then + desc=${desc:0:$(( maxdesclength - 1 ))} + desc+="…" + fi + comp+=" ($desc)" + fi + COMPREPLY[ci]=$comp + __%[1]s_debug "Final comp: $comp" + fi + done +} + +__start_%[1]s() +{ + local cur prev words cword split + + COMPREPLY=() + + # Call _init_completion from the bash-completion package + # to prepare the arguments properly + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -n =: || return + else + __%[1]s_init_completion -n =: || return + fi + + __%[1]s_debug + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $cword location, so we need + # to truncate the command-line ($words) up to the $cword location. + words=("${words[@]:0:$cword+1}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + local out directive + __%[1]s_get_completion_results + __%[1]s_process_completion_results +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%[1]s %[1]s +else + complete -o default -o nospace -F __start_%[1]s %[1]s +fi + +# ex: ts=4 sw=4 et filetype=sh +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, + activeHelpMarker)) +} + +// GenBashCompletionFileV2 generates Bash completion version 2. +func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletionV2(outFile, includeDesc) +} + +// GenBashCompletionV2 generates Bash completion file version 2 +// and writes it to the passed writer. +func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { + return c.genBashCompletion(w, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 000000000..b07b44a0c --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,239 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "os" + "reflect" + "strconv" + "strings" + "text/template" + "time" + "unicode" +) + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "trimTrailingWhitespaces": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() +var finalizers []func() + +const ( + defaultPrefixMatching = false + defaultCommandSorting = true + defaultCaseInsensitive = false +) + +// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// to automatically enable in CLI tools. +// Set this to true to enable it. +var EnablePrefixMatching = defaultPrefixMatching + +// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. +// To disable sorting, set it to false. +var EnableCommandSorting = defaultCommandSorting + +// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) +var EnableCaseInsensitive = defaultCaseInsensitive + +// MousetrapHelpText enables an information splash screen on Windows +// if the CLI is started from explorer.exe. +// To disable the mousetrap, just set this variable to blank string (""). +// Works only on Microsoft Windows. +var MousetrapHelpText = `This is a command line tool. + +You need to open cmd.exe and run it from there. +` + +// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows +// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. +// To disable the mousetrap, just set MousetrapHelpText to blank string (""). +// Works only on Microsoft Windows. +var MousetrapDisplayDuration = 5 * time.Second + +// AddTemplateFunc adds a template function that's available to Usage and Help +// template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +// AddTemplateFuncs adds multiple template functions that are available to Usage and +// Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +// OnInitialize sets the passed functions to be run when each command's +// Execute method is called. +func OnInitialize(y ...func()) { + initializers = append(initializers, y...) +} + +// OnFinalize sets the passed functions to be run when each command's +// Execute method is terminated. +func OnFinalize(y ...func()) { + finalizers = append(finalizers, y...) +} + +// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +// ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +// rpad adds padding to the right of a string. +func rpad(s string, padding int) string { + formattedString := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(formattedString, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them. +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. +func CheckErr(msg interface{}) { + if msg != nil { + fmt.Fprintln(os.Stderr, "Error:", msg) + os.Exit(1) + } +} + +// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. +func WriteStringAndCheck(b io.StringWriter, s string) { + _, err := b.WriteString(s) + CheckErr(err) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 000000000..01f7c6f1c --- /dev/null +++ b/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1834 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" + +// FParseErrWhitelist configures Flag parse errors to be ignored +type FParseErrWhitelist flag.ParseErrorsWhitelist + +// Group Structure to manage groups for commands +type Group struct { + ID string + Title string +} + +// Command is just that, a command for your application. +// E.g. 'go run ...' - 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Use is the one-line usage message. + // Recommended syntax is as follows: + // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. + // ... indicates that you can specify multiple values for the previous argument. + // | indicates mutually exclusive information. You can use the argument to the left of the separator or the + // argument to the right of the separator. You cannot use both arguments in a single use of the command. + // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are + // optional, they are enclosed in brackets ([ ]). + // Example: add [-F file | -D dir]... [-f format] profile + Use string + + // Aliases is an array of aliases that can be used instead of the first word in Use. + Aliases []string + + // SuggestFor is an array of command names for which this command will be suggested - + // similar to aliases but only suggests. + SuggestFor []string + + // Short is the short description shown in the 'help' output. + Short string + + // The group id under which this subcommand is grouped in the 'help' output of its parent. + GroupID string + + // Long is the long message shown in the 'help ' output. + Long string + + // Example is examples of how to use the command. + Example string + + // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions + ValidArgs []string + // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. + // It is a dynamic version of using ValidArgs. + // Only one of ValidArgs and ValidArgsFunction can be used for a command. + ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + + // Expected arguments + Args PositionalArgs + + // ArgAliases is List of aliases for ValidArgs. + // These are not suggested to the user in the shell completion, + // but accepted if entered manually. + ArgAliases []string + + // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + // For portability with other shells, it is recommended to instead use ValidArgsFunction + BashCompletionFunction string + + // Deprecated defines, if this command is deprecated and should print this string when used. + Deprecated string + + // Annotations are key/value pairs that can be used by applications to identify or + // group commands. + Annotations map[string]string + + // Version defines the version for this command. If this value is non-empty and the command does not + // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, + // will print content of the "Version" variable. A shorthand "v" flag will also be added if the + // command does not define one. + Version string + + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name. + // + // PersistentPreRun: children of this command will inherit and execute. + PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error. + PersistentPreRunE func(cmd *Command, args []string) error + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error. + PreRunE func(cmd *Command, args []string) error + // Run: Typically the actual work function. Most commands will only implement this. + Run func(cmd *Command, args []string) + // RunE: Run but returns an error. + RunE func(cmd *Command, args []string) error + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error. + PostRunE func(cmd *Command, args []string) error + // PersistentPostRun: children of this command will inherit and execute after PostRun. + PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error. + PersistentPostRunE func(cmd *Command, args []string) error + + // groups for subcommands + commandgroups []*Group + + // args is actual args parsed from flags. + args []string + // flagErrorBuf contains all error messages from pflag. + flagErrorBuf *bytes.Buffer + // flags is full set of flags. + flags *flag.FlagSet + // pflags contains persistent flags. + pflags *flag.FlagSet + // lflags contains local flags. + lflags *flag.FlagSet + // iflags contains inherited flags. + iflags *flag.FlagSet + // parentsPflags is all persistent flags of cmd's parents. + parentsPflags *flag.FlagSet + // globNormFunc is the global normalization function + // that we can use on every pflag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // usageFunc is usage func defined by user. + usageFunc func(*Command) error + // usageTemplate is usage template defined by user. + usageTemplate string + // flagErrorFunc is func defined by user and it's called when the parsing of + // flags returns an error. + flagErrorFunc func(*Command, error) error + // helpTemplate is help template defined by user. + helpTemplate string + // helpFunc is help func defined by user. + helpFunc func(*Command, []string) + // helpCommand is command with usage 'help'. If it's not defined by user, + // cobra uses default help command. + helpCommand *Command + // helpCommandGroupID is the group id for the helpCommand + helpCommandGroupID string + + // completionCommandGroupID is the group id for the completion command + completionCommandGroupID string + + // versionTemplate is the version template defined by user. + versionTemplate string + + // inReader is a reader defined by the user that replaces stdin + inReader io.Reader + // outWriter is a writer defined by the user that replaces stdout + outWriter io.Writer + // errWriter is a writer defined by the user that replaces stderr + errWriter io.Writer + + // FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + + // CompletionOptions is a set of options to control the handling of shell completion + CompletionOptions CompletionOptions + + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + ctx context.Context + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int +} + +// Context returns underlying command context. If command was executed +// with ExecuteContext or the context was set with SetContext, the +// previously set context will be returned. Otherwise, nil is returned. +// +// Notice that a call to Execute and ExecuteC will replace a nil context of +// a command with a context.Background, so a background context will be +// returned by Context after one of these functions has been called. +func (c *Command) Context() context.Context { + return c.ctx +} + +// SetContext sets context for the command. This context will be overwritten by +// Command.ExecuteContext or Command.ExecuteContextC. +func (c *Command) SetContext(ctx context.Context) { + c.ctx = ctx +} + +// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +// Deprecated: Use SetOut and/or SetErr instead +func (c *Command) SetOutput(output io.Writer) { + c.outWriter = output + c.errWriter = output +} + +// SetOut sets the destination for usage messages. +// If newOut is nil, os.Stdout is used. +func (c *Command) SetOut(newOut io.Writer) { + c.outWriter = newOut +} + +// SetErr sets the destination for error messages. +// If newErr is nil, os.Stderr is used. +func (c *Command) SetErr(newErr io.Writer) { + c.errWriter = newErr +} + +// SetIn sets the source for input data +// If newIn is nil, os.Stdin is used. +func (c *Command) SetIn(newIn io.Reader) { + c.inReader = newIn +} + +// SetUsageFunc sets usage function. Usage can be defined by application. +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// SetUsageTemplate sets usage template. Can be defined by Application. +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// SetFlagErrorFunc sets a function to generate an error when flag parsing +// fails. +func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { + c.flagErrorFunc = f +} + +// SetHelpFunc sets help function. Can be defined by Application. +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +// SetHelpCommand sets help command. +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// SetHelpCommandGroupID sets the group id of the help command. +func (c *Command) SetHelpCommandGroupID(groupID string) { + if c.helpCommand != nil { + c.helpCommand.GroupID = groupID + } + // helpCommandGroupID is used if no helpCommand is defined by the user + c.helpCommandGroupID = groupID +} + +// SetCompletionCommandGroupID sets the group id of the completion command. +func (c *Command) SetCompletionCommandGroupID(groupID string) { + // completionCommandGroupID is used if no completion command is defined by the user + c.Root().completionCommandGroupID = groupID +} + +// SetHelpTemplate sets help template to be used. Application can use it to set custom template. +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetVersionTemplate sets version template to be used. Application can use it to set custom template. +func (c *Command) SetVersionTemplate(s string) { + c.versionTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +// OutOrStdout returns output to stdout. +func (c *Command) OutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// OutOrStderr returns output to stderr +func (c *Command) OutOrStderr() io.Writer { + return c.getOut(os.Stderr) +} + +// ErrOrStderr returns output to stderr +func (c *Command) ErrOrStderr() io.Writer { + return c.getErr(os.Stderr) +} + +// InOrStdin returns input to stdin +func (c *Command) InOrStdin() io.Reader { + return c.getIn(os.Stdin) +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.outWriter != nil { + return c.outWriter + } + if c.HasParent() { + return c.parent.getOut(def) + } + return def +} + +func (c *Command) getErr(def io.Writer) io.Writer { + if c.errWriter != nil { + return c.errWriter + } + if c.HasParent() { + return c.parent.getErr(def) + } + return def +} + +func (c *Command) getIn(def io.Reader) io.Reader { + if c.inReader != nil { + return c.inReader + } + if c.HasParent() { + return c.parent.getIn(def) + } + return def +} + +// UsageFunc returns either the function set by SetUsageFunc for this command +// or a parent, or it returns a default usage function. +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + if c.HasParent() { + return c.Parent().UsageFunc() + } + return func(c *Command) error { + c.mergePersistentFlags() + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + if err != nil { + c.PrintErrln(err) + } + return err + } +} + +// Usage puts out the usage for the command. +// Used when a user provides invalid input. +// Can be defined by user by overriding UsageFunc. +func (c *Command) Usage() error { + return c.UsageFunc()(c) +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function with default help behavior. +func (c *Command) HelpFunc() func(*Command, []string) { + if c.helpFunc != nil { + return c.helpFunc + } + if c.HasParent() { + return c.Parent().HelpFunc() + } + return func(c *Command, a []string) { + c.mergePersistentFlags() + // The help should be sent to stdout + // See https://github.com/spf13/cobra/issues/1002 + err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + if err != nil { + c.PrintErrln(err) + } + } +} + +// Help puts out the help for the command. +// Used when a user calls help [command]. +// Can be defined by user by overriding HelpFunc. +func (c *Command) Help() error { + c.HelpFunc()(c, []string{}) + return nil +} + +// UsageString returns usage string. +func (c *Command) UsageString() string { + // Storing normal writers + tmpOutput := c.outWriter + tmpErr := c.errWriter + + bb := new(bytes.Buffer) + c.outWriter = bb + c.errWriter = bb + + CheckErr(c.Usage()) + + // Setting things back to normal + c.outWriter = tmpOutput + c.errWriter = tmpErr + + return bb.String() +} + +// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this +// command or a parent, or it returns a function which returns the original +// error. +func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { + if c.flagErrorFunc != nil { + return c.flagErrorFunc + } + + if c.HasParent() { + return c.parent.FlagErrorFunc() + } + return func(c *Command, err error) error { + return err + } +} + +var minUsagePadding = 25 + +// UsagePadding return padding for the usage. +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } + return c.parent.commandsMaxUseLen +} + +var minCommandPathPadding = 11 + +// CommandPathPadding return padding for the command path. +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } + return c.parent.commandsMaxCommandPathLen +} + +var minNamePadding = 11 + +// NamePadding returns padding for the name. +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } + return c.parent.commandsMaxNameLen +} + +// UsageTemplate returns usage template for the command. +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } + return `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} + +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` +} + +// HelpTemplate return help template for the command. +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } + return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// VersionTemplate return version template for the command. +func (c *Command) VersionTemplate() string { + if c.versionTemplate != "" { + return c.versionTemplate + } + + if c.HasParent() { + return c.parent.VersionTemplate() + } + return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` +} + +func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { + flag := fs.Lookup(name) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { + if len(name) == 0 { + return false + } + + flag := fs.ShorthandLookup(name[:1]) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func stripFlags(args []string, c *Command) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + flags := c.Flags() + +Loop: + for len(args) > 0 { + s := args[0] + args = args[1:] + switch { + case s == "--": + // "--" terminates the flags + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + // If '--flag arg' then + // delete arg from args. + fallthrough // (do the same as below) + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // If '-f arg' then + // delete 'arg' from args or break the loop if len(args) <= 1. + if len(args) <= 1 { + break Loop + } else { + args = args[1:] + continue + } + case s != "" && !strings.HasPrefix(s, "-"): + commands = append(commands, s) + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +// Special care needs to be taken not to remove a flag value. +func (c *Command) argsMinusFirstX(args []string, x string) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + flags := c.Flags() + +Loop: + for pos := 0; pos < len(args); pos++ { + s := args[pos] + switch { + case s == "--": + // -- means we have reached the end of the parseable args. Break out of the loop now. + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + fallthrough + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip + // over the next arg, because that is the value of this flag. + pos++ + continue + case !strings.HasPrefix(s, "-"): + // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, + // return the args, excluding the one at this position. + if s == x { + ret := []string{} + ret = append(ret, args[:pos]...) + ret = append(ret, args[pos+1:]...) + return ret + } + } + } + return args +} + +func isFlagArg(arg string) bool { + return ((len(arg) >= 3 && arg[0:2] == "--") || + (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) +} + +// Find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) == 0 { + return c, innerArgs + } + nextSubCmd := argsWOflags[0] + + cmd := c.findNext(nextSubCmd) + if cmd != nil { + return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) + } + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + if commandFound.Args == nil { + return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) + } + return commandFound, a, nil +} + +func (c *Command) findSuggestions(arg string) string { + if c.DisableSuggestions { + return "" + } + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + suggestionsString := "" + if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + return suggestionsString +} + +func (c *Command) findNext(next string) *Command { + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) { + cmd.commandCalledAs.name = next + return cmd + } + if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { + matches = append(matches, cmd) + } + } + + if len(matches) == 1 { + return matches[0] + } + + return nil +} + +// Traverse the command tree to find the command, and parse args for +// each parent. +func (c *Command) Traverse(args []string) (*Command, []string, error) { + flags := []string{} + inFlag := false + + for i, arg := range args { + switch { + // A long flag with a space separated value + case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) + flags = append(flags, arg) + continue + // A short flag with a space separated value + case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): + inFlag = true + flags = append(flags, arg) + continue + // The value for a flag + case inFlag: + inFlag = false + flags = append(flags, arg) + continue + // A flag without a value, or with an `=` separated value + case isFlagArg(arg): + flags = append(flags, arg) + continue + } + + cmd := c.findNext(arg) + if cmd == nil { + return c, args, nil + } + + if err := c.ParseFlags(flags); err != nil { + return nil, args, err + } + return cmd.Traverse(args[i+1:]) + } + return c, args, nil +} + +// SuggestionsFor provides suggestions for the typedName. +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +// VisitParents visits all parents of the command and invokes fn on each parent. +func (c *Command) VisitParents(fn func(*Command)) { + if c.HasParent() { + fn(c.Parent()) + c.Parent().VisitParents(fn) + } +} + +// Root finds root command. +func (c *Command) Root() *Command { + if c.HasParent() { + return c.Parent().Root() + } + return c +} + +// ArgsLenAtDash will return the length of c.Flags().Args at the moment +// when a -- was found during args parsing. +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + // initialize help and version flag at the last point possible to allow for user + // overriding + c.InitDefaultHelpFlag() + c.InitDefaultVersionFlag() + + err = c.ParseFlags(a) + if err != nil { + return c.FlagErrorFunc()(c, err) + } + + // If help is called, regardless of other flags, return we want help. + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in InitDefaultHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + + if helpVal { + return flag.ErrHelp + } + + // for back-compat, only add version flag behavior if version is defined + if c.Version != "" { + versionVal, err := c.Flags().GetBool("version") + if err != nil { + c.Println("\"version\" flag declared as non-bool. Please correct your code") + return err + } + if versionVal { + err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } + } + + if !c.Runnable() { + return flag.ErrHelp + } + + c.preRun() + + defer c.postRun() + + argWoFlags := c.Flags().Args() + if c.DisableFlagParsing { + argWoFlags = a + } + + if err := c.ValidateArgs(argWoFlags); err != nil { + return err + } + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + if err := c.ValidateRequiredFlags(); err != nil { + return err + } + if err := c.ValidateFlagGroups(); err != nil { + return err + } + + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +func (c *Command) postRun() { + for _, x := range finalizers { + x() + } +} + +// ExecuteContext is the same as Execute(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContext(ctx context.Context) error { + c.ctx = ctx + return c.Execute() +} + +// Execute uses the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { + c.ctx = ctx + return c.ExecuteC() +} + +// ExecuteC executes the command. +func (c *Command) ExecuteC() (cmd *Command, err error) { + if c.ctx == nil { + c.ctx = context.Background() + } + + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().ExecuteC() + } + + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) + } + + // initialize help at the last point to allow for user overriding + c.InitDefaultHelpCmd() + // initialize completion at the last point to allow for user overriding + c.InitDefaultCompletionCmd() + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + + args := c.args + + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { + args = os.Args[1:] + } + + // initialize the hidden command to be used for shell completion + c.initCompleteCmd(args) + + var flags []string + if c.TraverseChildren { + cmd, flags, err = c.Traverse(args) + } else { + cmd, flags, err = c.Find(args) + } + if err != nil { + // If found parse to a subcommand and then failed, talk about the subcommand + if cmd != nil { + c = cmd + } + if !c.SilenceErrors { + c.PrintErrln("Error:", err.Error()) + c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err + } + + cmd.commandCalledAs.called = true + if cmd.commandCalledAs.name == "" { + cmd.commandCalledAs.name = cmd.Name() + } + + // We have to pass global context to children command + // if context is present on the parent command. + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + + err = cmd.execute(flags) + if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect + if errors.Is(err, flag.ErrHelp) { + cmd.HelpFunc()(cmd, args) + return cmd, nil + } + + // If root command has SilenceErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.PrintErrln("Error:", err.Error()) + } + + // If root command has SilenceUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + } + return cmd, err +} + +func (c *Command) ValidateArgs(args []string) error { + if c.Args == nil { + return ArbitraryArgs(c, args) + } + return c.Args(c, args) +} + +// ValidateRequiredFlags validates all required flags are present and returns an error otherwise +func (c *Command) ValidateRequiredFlags() error { + if c.DisableFlagParsing { + return nil + } + + flags := c.Flags() + missingFlagNames := []string{} + flags.VisitAll(func(pflag *flag.Flag) { + requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] + if !found { + return + } + if (requiredAnnotation[0] == "true") && !pflag.Changed { + missingFlagNames = append(missingFlagNames, pflag.Name) + } + }) + + if len(missingFlagNames) > 0 { + return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) + } + return nil +} + +// checkCommandGroups checks if a command has been added to a group that does not exists. +// If so, we panic because it indicates a coding error that should be corrected. +func (c *Command) checkCommandGroups() { + for _, sub := range c.commands { + // if Group is not defined let the developer know right away + if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) { + panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath())) + } + + sub.checkCommandGroups() + } +} + +// InitDefaultHelpFlag adds default help flag to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help flag, it will do nothing. +func (c *Command) InitDefaultHelpFlag() { + c.mergePersistentFlags() + if c.Flags().Lookup("help") == nil { + usage := "help for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().BoolP("help", "h", false, usage) + _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) + } +} + +// InitDefaultVersionFlag adds default version flag to c. +// It is called automatically by executing the c. +// If c already has a version flag, it will do nothing. +// If c.Version is empty, it will do nothing. +func (c *Command) InitDefaultVersionFlag() { + if c.Version == "" { + return + } + + c.mergePersistentFlags() + if c.Flags().Lookup("version") == nil { + usage := "version for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + if c.Flags().ShorthandLookup("v") == nil { + c.Flags().BoolP("version", "v", false, usage) + } else { + c.Flags().Bool("version", false, usage) + } + _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"}) + } +} + +// InitDefaultHelpCmd adds default help command to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help command or c has no subcommands, it will do nothing. +func (c *Command) InitDefaultHelpCmd() { + if !c.HasSubCommands() { + return + } + + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. +Simply type ` + c.Name() + ` help [path to command] for full details.`, + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + var completions []string + cmd, _, e := c.Root().Find(args) + if e != nil { + return nil, ShellCompDirectiveNoFileComp + } + if cmd == nil { + // Root help command. + cmd = c.Root() + } + for _, subCmd := range cmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + } + } + return completions, ShellCompDirectiveNoFileComp + }, + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q\n", args) + CheckErr(c.Root().Usage()) + } else { + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown + cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown + CheckErr(cmd.Help()) + } + }, + GroupID: c.helpCommandGroupID, + } + } + c.RemoveCommand(c.helpCommand) + c.AddCommand(c.helpCommand) +} + +// ResetCommands delete parent, subcommand and help command from c. +func (c *Command) ResetCommands() { + c.parent = nil + c.commands = nil + c.helpCommand = nil + c.parentsPflags = nil +} + +// Sorts commands by their names. +type commandSorterByName []*Command + +func (c commandSorterByName) Len() int { return len(c) } +func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } + +// Commands returns a sorted slice of child commands. +func (c *Command) Commands() []*Command { + // do not sort commands if it already sorted or sorting was disabled + if EnableCommandSorting && !c.commandsAreSorted { + sort.Sort(commandSorterByName(c.commands)) + c.commandsAreSorted = true + } + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If global normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + c.commandsAreSorted = false + } +} + +// Groups returns a slice of child command groups. +func (c *Command) Groups() []*Group { + return c.commandgroups +} + +// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group +func (c *Command) AllChildCommandsHaveGroup() bool { + for _, sub := range c.commands { + if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" { + return false + } + } + return true +} + +// ContainsGroup return if groupID exists in the list of command groups. +func (c *Command) ContainsGroup(groupID string) bool { + for _, x := range c.commandgroups { + if x.ID == groupID { + return true + } + } + return false +} + +// AddGroup adds one or more command groups to this parent command. +func (c *Command) AddGroup(groups ...*Group) { + c.commandgroups = append(c.commandgroups, groups...) +} + +// RemoveCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.OutOrStderr(), i...) +} + +// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. +func (c *Command) Println(i ...interface{}) { + c.Print(fmt.Sprintln(i...)) +} + +// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. +func (c *Command) Printf(format string, i ...interface{}) { + c.Print(fmt.Sprintf(format, i...)) +} + +// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErr(i ...interface{}) { + fmt.Fprint(c.ErrOrStderr(), i...) +} + +// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrln(i ...interface{}) { + c.PrintErr(fmt.Sprintln(i...)) +} + +// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrf(format string, i ...interface{}) { + c.PrintErr(fmt.Sprintf(format, i...)) +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + if c.HasParent() { + return c.Parent().CommandPath() + " " + c.Name() + } + return c.Name() +} + +// UseLine puts out the full usage for a given command (including parents). +func (c *Command) UseLine() string { + var useline string + if c.HasParent() { + useline = c.parent.CommandPath() + " " + c.Use + } else { + useline = c.Use + } + if c.DisableFlagsInUseLine { + return useline + } + if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { + useline += " [flags]" + } + return useline +} + +// DebugFlags used to determine which flags have been assigned to which commands +// and which persist. +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// HasAlias determines if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if commandNameMatches(a, s) { + return true + } + } + return false +} + +// CalledAs returns the command name or alias that was used to invoke +// this command or an empty string if the command has not been called. +func (c *Command) CalledAs() string { + if c.commandCalledAs.called { + return c.commandCalledAs.name + } + return "" +} + +// hasNameOrAliasPrefix returns true if the Name or any of aliases start +// with prefix +func (c *Command) hasNameOrAliasPrefix(prefix string) bool { + if strings.HasPrefix(c.Name(), prefix) { + c.commandCalledAs.name = c.Name() + return true + } + for _, alias := range c.Aliases { + if strings.HasPrefix(alias, prefix) { + c.commandCalledAs.name = alias + return true + } + } + return false +} + +// NameAndAliases returns a list of the command name and all aliases +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +// HasExample determines if the command has example. +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Runnable determines if the command is itself runnable. +func (c *Command) Runnable() bool { + return c.Run != nil || c.RunE != nil +} + +// HasSubCommands determines if the command has children commands. +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands). +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { + return false + } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsAdditionalHelpTopicCommand determines if a command is an additional +// help topic command; additional help topic command is determined by the +// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that +// are runnable/hidden/deprecated. +// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. +func (c *Command) IsAdditionalHelpTopicCommand() bool { + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command + for _, sub := range c.commands { + if !sub.IsAdditionalHelpTopicCommand() { + return false + } + } + + // the command either has no sub commands, or no non-help sub commands + return true +} + +// HasHelpSubCommands determines if a command has any available 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics'. +func (c *Command) HasHelpSubCommands() bool { + // return true on the first found available 'help' sub command + for _, sub := range c.commands { + if sub.IsAdditionalHelpTopicCommand() { + return true + } + } + + // the command either has no sub commands, or no available 'help' sub commands + return false +} + +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands'. +func (c *Command) HasAvailableSubCommands() bool { + // return true on the first found available (non deprecated/help/hidden) + // sub command + for _, sub := range c.commands { + if sub.IsAvailableCommand() { + return true + } + } + + // the command either has no sub commands, or no available (non deprecated/help/hidden) + // sub commands + return false +} + +// HasParent determines if the command is a child command. +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Flags returns the complete FlagSet that applies +// to this command (local and persistent declared here and by all parents). +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + } + + return c.flags +} + +// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { + persistentFlags := c.PersistentFlags() + + out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.LocalFlags().VisitAll(func(f *flag.Flag) { + if persistentFlags.Lookup(f.Name) == nil { + out.AddFlag(f) + } + }) + return out +} + +// LocalFlags returns the local FlagSet specifically set in the current command. +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + } + c.lflags.SortFlags = c.Flags().SortFlags + if c.globNormFunc != nil { + c.lflags.SetNormalizeFunc(c.globNormFunc) + } + + addToLocal := func(f *flag.Flag) { + // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag + if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) { + c.lflags.AddFlag(f) + } + } + c.Flags().VisitAll(addToLocal) + c.PersistentFlags().VisitAll(addToLocal) + return c.lflags +} + +// InheritedFlags returns all flags which were inherited from parent commands. +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.iflags == nil { + c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.iflags.SetOutput(c.flagErrorBuf) + } + + local := c.LocalFlags() + if c.globNormFunc != nil { + c.iflags.SetNormalizeFunc(c.globNormFunc) + } + + c.parentsPflags.VisitAll(func(f *flag.Flag) { + if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + c.iflags.AddFlag(f) + } + }) + return c.iflags +} + +// NonInheritedFlags returns all flags which were not inherited from parent commands. +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// PersistentFlags returns the persistent FlagSet specifically set in the current command. +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// ResetFlags deletes all flags from command. +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) + + c.lflags = nil + c.iflags = nil + c.parentsPflags = nil +} + +// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// HasPersistentFlags checks if the command contains persistent flags. +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// HasLocalFlags checks if the command has flags specifically declared locally. +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +// HasInheritedFlags checks if the command has flags inherited from its parent command. +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire +// structure) which are not hidden or deprecated. +func (c *Command) HasAvailableFlags() bool { + return c.Flags().HasAvailableFlags() +} + +// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. +func (c *Command) HasAvailablePersistentFlags() bool { + return c.PersistentFlags().HasAvailableFlags() +} + +// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden +// or deprecated. +func (c *Command) HasAvailableLocalFlags() bool { + return c.LocalFlags().HasAvailableFlags() +} + +// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are +// not hidden or deprecated. +func (c *Command) HasAvailableInheritedFlags() bool { + return c.InheritedFlags().HasAvailableFlags() +} + +// Flag climbs up the command tree looking for matching flag. +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// Recursively find matching persistent flag. +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil { + c.updateParentsPflags() + flag = c.parentsPflags.Lookup(name) + } + return +} + +// ParseFlags parses persistent flag tree and local flags. +func (c *Command) ParseFlags(args []string) error { + if c.DisableFlagParsing { + return nil + } + + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + beforeErrorBufLen := c.flagErrorBuf.Len() + c.mergePersistentFlags() + + // do it here after merging all flags and just before parse + c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + + err := c.Flags().Parse(args) + // Print warnings if they occurred (e.g. deprecated flag messages). + if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { + c.Print(c.flagErrorBuf.String()) + } + + return err +} + +// Parent returns a commands parent command. +func (c *Command) Parent() *Command { + return c.parent +} + +// mergePersistentFlags merges c.PersistentFlags() to c.Flags() +// and adds missing persistent flags of all parents. +func (c *Command) mergePersistentFlags() { + c.updateParentsPflags() + c.Flags().AddFlagSet(c.PersistentFlags()) + c.Flags().AddFlagSet(c.parentsPflags) +} + +// updateParentsPflags updates c.parentsPflags by adding +// new persistent flags of all parents. +// If c.parentsPflags == nil, it makes new. +func (c *Command) updateParentsPflags() { + if c.parentsPflags == nil { + c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags.SetOutput(c.flagErrorBuf) + c.parentsPflags.SortFlags = false + } + + if c.globNormFunc != nil { + c.parentsPflags.SetNormalizeFunc(c.globNormFunc) + } + + c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) + + c.VisitParents(func(parent *Command) { + c.parentsPflags.AddFlagSet(parent.PersistentFlags()) + }) +} + +// commandNameMatches checks if two command names are equal +// taking into account case sensitivity according to +// EnableCaseInsensitive global configuration. +func commandNameMatches(s string, t string) bool { + if EnableCaseInsensitive { + return strings.EqualFold(s, t) + } + + return s == t +} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 000000000..307f0c127 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,20 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package cobra + +var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 000000000..adbef395c --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,41 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows +// +build windows + +package cobra + +import ( + "fmt" + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +func preExecHook(c *Command) { + if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + if MousetrapDisplayDuration > 0 { + time.Sleep(MousetrapDisplayDuration) + } else { + c.Println("Press return to continue...") + fmt.Scanln() + } + os.Exit(1) + } +} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go new file mode 100644 index 000000000..ee38c4d0b --- /dev/null +++ b/vendor/github.com/spf13/cobra/completions.go @@ -0,0 +1,878 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "os" + "strings" + "sync" + + "github.com/spf13/pflag" +) + +const ( + // ShellCompRequestCmd is the name of the hidden command that is used to request + // completion results from the program. It is used by the shell completion scripts. + ShellCompRequestCmd = "__complete" + // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request + // completion results without their description. It is used by the shell completion scripts. + ShellCompNoDescRequestCmd = "__completeNoDesc" +) + +// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. +var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} + +// lock for reading and writing from flagCompletionFunctions +var flagCompletionMutex = &sync.RWMutex{} + +// ShellCompDirective is a bit map representing the different behaviors the shell +// can be instructed to have once completions have been provided. +type ShellCompDirective int + +type flagCompError struct { + subCommand string + flagName string +} + +func (e *flagCompError) Error() string { + return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" +} + +const ( + // ShellCompDirectiveError indicates an error occurred and completions should be ignored. + ShellCompDirectiveError ShellCompDirective = 1 << iota + + // ShellCompDirectiveNoSpace indicates that the shell should not add a space + // after the completion even if there is a single completion provided. + ShellCompDirectiveNoSpace + + // ShellCompDirectiveNoFileComp indicates that the shell should not provide + // file completion even when no completion is provided. + ShellCompDirectiveNoFileComp + + // ShellCompDirectiveFilterFileExt indicates that the provided completions + // should be used as file extension filters. + // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() + // is a shortcut to using this directive explicitly. The BashCompFilenameExt + // annotation can also be used to obtain the same behavior for flags. + ShellCompDirectiveFilterFileExt + + // ShellCompDirectiveFilterDirs indicates that only directory names should + // be provided in file completion. To request directory names within another + // directory, the returned completions should specify the directory within + // which to search. The BashCompSubdirsInDir annotation can be used to + // obtain the same behavior but only for flags. + ShellCompDirectiveFilterDirs + + // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order + // in which the completions are provided + ShellCompDirectiveKeepOrder + + // =========================================================================== + + // All directives using iota should be above this one. + // For internal use. + shellCompDirectiveMaxValue + + // ShellCompDirectiveDefault indicates to let the shell perform its default + // behavior after completions have been provided. + // This one must be last to avoid messing up the iota count. + ShellCompDirectiveDefault ShellCompDirective = 0 +) + +const ( + // Constants for the completion command + compCmdName = "completion" + compCmdNoDescFlagName = "no-descriptions" + compCmdNoDescFlagDesc = "disable completion descriptions" + compCmdNoDescFlagDefault = false +) + +// CompletionOptions are the options to control shell completion +type CompletionOptions struct { + // DisableDefaultCmd prevents Cobra from creating a default 'completion' command + DisableDefaultCmd bool + // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + // for shells that support completion descriptions + DisableNoDescFlag bool + // DisableDescriptions turns off all completion descriptions for shells + // that support them + DisableDescriptions bool + // HiddenDefaultCmd makes the default 'completion' command hidden + HiddenDefaultCmd bool +} + +// NoFileCompletions can be used to disable file completion for commands that should +// not trigger file completions. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return nil, ShellCompDirectiveNoFileComp +} + +// FixedCompletions can be used to create a completion function which always +// returns the same results. +func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return choices, directive + } +} + +// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { + flag := c.Flag(flagName) + if flag == nil { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) + } + flagCompletionMutex.Lock() + defer flagCompletionMutex.Unlock() + + if _, exists := flagCompletionFunctions[flag]; exists { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) + } + flagCompletionFunctions[flag] = f + return nil +} + +// Returns a string listing the different directive enabled in the specified parameter +func (d ShellCompDirective) string() string { + var directives []string + if d&ShellCompDirectiveError != 0 { + directives = append(directives, "ShellCompDirectiveError") + } + if d&ShellCompDirectiveNoSpace != 0 { + directives = append(directives, "ShellCompDirectiveNoSpace") + } + if d&ShellCompDirectiveNoFileComp != 0 { + directives = append(directives, "ShellCompDirectiveNoFileComp") + } + if d&ShellCompDirectiveFilterFileExt != 0 { + directives = append(directives, "ShellCompDirectiveFilterFileExt") + } + if d&ShellCompDirectiveFilterDirs != 0 { + directives = append(directives, "ShellCompDirectiveFilterDirs") + } + if d&ShellCompDirectiveKeepOrder != 0 { + directives = append(directives, "ShellCompDirectiveKeepOrder") + } + if len(directives) == 0 { + directives = append(directives, "ShellCompDirectiveDefault") + } + + if d >= shellCompDirectiveMaxValue { + return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) + } + return strings.Join(directives, ", ") +} + +// initCompleteCmd adds a special hidden command that can be used to request custom completions. +func (c *Command) initCompleteCmd(args []string) { + completeCmd := &Command{ + Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), + Aliases: []string{ShellCompNoDescRequestCmd}, + DisableFlagsInUseLine: true, + Hidden: true, + DisableFlagParsing: true, + Args: MinimumNArgs(1), + Short: "Request shell completion choices for the specified command-line", + Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", + "to request completion choices for the specified command-line.", ShellCompRequestCmd), + Run: func(cmd *Command, args []string) { + finalCmd, completions, directive, err := cmd.getCompletions(args) + if err != nil { + CompErrorln(err.Error()) + // Keep going for multiple reasons: + // 1- There could be some valid completions even though there was an error + // 2- Even without completions, we need to print the directive + } + + noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + for _, comp := range completions { + if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { + // Remove all activeHelp entries in this case + if strings.HasPrefix(comp, activeHelpMarker) { + continue + } + } + if noDescriptions { + // Remove any description that may be included following a tab character. + comp = strings.Split(comp, "\t")[0] + } + + // Make sure we only write the first line to the output. + // This is needed if a description contains a linebreak. + // Otherwise the shell scripts will interpret the other lines as new flags + // and could therefore provide a wrong completion. + comp = strings.Split(comp, "\n")[0] + + // Finally trim the completion. This is especially important to get rid + // of a trailing tab when there are no description following it. + // For example, a sub-command without a description should not be completed + // with a tab at the end (or else zsh will show a -- following it + // although there is no description). + comp = strings.TrimSpace(comp) + + // Print each possible completion to stdout for the completion script to consume. + fmt.Fprintln(finalCmd.OutOrStdout(), comp) + } + + // As the last printout, print the completion directive for the completion script to parse. + // The directive integer must be that last character following a single colon (:). + // The completion script expects : + fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + + // Print some helpful info to stderr for the user to understand. + // Output from stderr must be ignored by the completion script. + fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) + }, + } + c.AddCommand(completeCmd) + subCmd, _, err := c.Find(args) + if err != nil || subCmd.Name() != ShellCompRequestCmd { + // Only create this special command if it is actually being called. + // This reduces possible side-effects of creating such a command; + // for example, having this command would cause problems to a + // cobra program that only consists of the root command, since this + // command would cause the root command to suddenly have a subcommand. + c.RemoveCommand(completeCmd) + } +} + +func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { + // The last argument, which is not completely typed by the user, + // should not be part of the list of arguments + toComplete := args[len(args)-1] + trimmedArgs := args[:len(args)-1] + + var finalCmd *Command + var finalArgs []string + var err error + // Find the real command for which completion must be performed + // check if we need to traverse here to parse local flags on parent commands + if c.Root().TraverseChildren { + finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) + } else { + // For Root commands that don't specify any value for their Args fields, when we call + // Find(), if those Root commands don't have any sub-commands, they will accept arguments. + // However, because we have added the __complete sub-command in the current code path, the + // call to Find() -> legacyArgs() will return an error if there are any arguments. + // To avoid this, we first remove the __complete command to get back to having no sub-commands. + rootCmd := c.Root() + if len(rootCmd.Commands()) == 1 { + rootCmd.RemoveCommand(c) + } + + finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) + } + if err != nil { + // Unable to find the real command. E.g., someInvalidCmd + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + } + finalCmd.ctx = c.ctx + + // These flags are normally added when `execute()` is called on `finalCmd`, + // however, when doing completion, we don't call `finalCmd.execute()`. + // Let's add the --help and --version flag ourselves. + finalCmd.InitDefaultHelpFlag() + finalCmd.InitDefaultVersionFlag() + + // Check if we are doing flag value completion before parsing the flags. + // This is important because if we are completing a flag value, we need to also + // remove the flag name argument from the list of finalArgs or else the parsing + // could fail due to an invalid value (incomplete) for the flag. + flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) + + // Check if interspersed is false or -- was set on a previous arg. + // This works by counting the arguments. Normally -- is not counted as arg but + // if -- was already set or interspersed is false and there is already one arg then + // the extra added -- is counted as arg. + flagCompletion := true + _ = finalCmd.ParseFlags(append(finalArgs, "--")) + newArgCount := finalCmd.Flags().NArg() + + // Parse the flags early so we can check if required flags are set + if err = finalCmd.ParseFlags(finalArgs); err != nil { + return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + } + + realArgCount := finalCmd.Flags().NArg() + if newArgCount > realArgCount { + // don't do flag completion (see above) + flagCompletion = false + } + // Error while attempting to parse flags + if flagErr != nil { + // If error type is flagCompError and we don't want flagCompletion we should ignore the error + if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + } + } + + // Look for the --help or --version flags. If they are present, + // there should be no further completions. + if helpOrVersionFlagPresent(finalCmd) { + return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + } + + // We only remove the flags from the arguments if DisableFlagParsing is not set. + // This is important for commands which have requested to do their own flag completion. + if !finalCmd.DisableFlagParsing { + finalArgs = finalCmd.Flags().Args() + } + + if flag != nil && flagCompletion { + // Check if we are completing a flag value subject to annotations + if validExts, present := flag.Annotations[BashCompFilenameExt]; present { + if len(validExts) != 0 { + // File completion filtered by extensions + return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil + } + + // The annotation requests simple file completion. There is no reason to do + // that since it is the default behavior anyway. Let's ignore this annotation + // in case the program also registered a completion function for this flag. + // Even though it is a mistake on the program's side, let's be nice when we can. + } + + if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { + if len(subDir) == 1 { + // Directory completion from within a directory + return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil + } + // Directory completion + return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + } + } + + var completions []string + var directive ShellCompDirective + + // Enforce flag groups before doing flag completions + finalCmd.enforceFlagGroupsForCompletion() + + // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; + // doing this allows for completion of persistent flag names even for commands that disable flag parsing. + // + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires + // the flag name to be complete + if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { + // First check for required flags + completions = completeRequireFlags(finalCmd, toComplete) + + // If we have not found any required flags, only then can we show regular flags + if len(completions) == 0 { + doCompleteFlags := func(flag *pflag.Flag) { + if !flag.Changed || + strings.Contains(flag.Value.Type(), "Slice") || + strings.Contains(flag.Value.Type(), "Array") { + // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + // we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + } + + directive = ShellCompDirectiveNoFileComp + if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { + // If there is a single completion, the shell usually adds a space + // after the completion. We don't want that if the flag ends with an = + directive = ShellCompDirectiveNoSpace + } + + if !finalCmd.DisableFlagParsing { + // If DisableFlagParsing==false, we have completed the flags as known by Cobra; + // we can return what we found. + // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we + // let the logic continue to see if ValidArgsFunction needs to be called. + return finalCmd, completions, directive, nil + } + } else { + directive = ShellCompDirectiveDefault + if flag == nil { + foundLocalNonPersistentFlag := false + // If TraverseChildren is true on the root command we don't check for + // local flags because we can use a local flag on a parent command + if !finalCmd.Root().TraverseChildren { + // Check if there are any local, non-persistent flags on the command-line + localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { + foundLocalNonPersistentFlag = true + } + }) + } + + // Complete subcommand names, including the help command + if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { + // We only complete sub-commands if: + // - there are no arguments on the command-line and + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true + for _, subCmd := range finalCmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + directive = ShellCompDirectiveNoFileComp + } + } + } + + // Complete required flags even without the '-' prefix + completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) + + // Always complete ValidArgs, even if we are completing a subcommand name. + // This is for commands that have both subcommands and ValidArgs. + if len(finalCmd.ValidArgs) > 0 { + if len(finalArgs) == 0 { + // ValidArgs are only for the first argument + for _, validArg := range finalCmd.ValidArgs { + if strings.HasPrefix(validArg, toComplete) { + completions = append(completions, validArg) + } + } + directive = ShellCompDirectiveNoFileComp + + // If no completions were found within commands or ValidArgs, + // see if there are any ArgAliases that should be completed. + if len(completions) == 0 { + for _, argAlias := range finalCmd.ArgAliases { + if strings.HasPrefix(argAlias, toComplete) { + completions = append(completions, argAlias) + } + } + } + } + + // If there are ValidArgs specified (even if they don't match), we stop completion. + // Only one of ValidArgs or ValidArgsFunction can be used for a single command. + return finalCmd, completions, directive, nil + } + + // Let the logic continue so as to add any ValidArgsFunction completions, + // even if we already found sub-commands. + // This is for commands that have subcommands but also specify a ValidArgsFunction. + } + } + + // Find the completion function for the flag or command + var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + if flag != nil && flagCompletion { + flagCompletionMutex.RLock() + completionFn = flagCompletionFunctions[flag] + flagCompletionMutex.RUnlock() + } else { + completionFn = finalCmd.ValidArgsFunction + } + if completionFn != nil { + // Go custom completion defined for this flag or command. + // Call the registered completion function to get the completions. + var comps []string + comps, directive = completionFn(finalCmd, finalArgs, toComplete) + completions = append(completions, comps...) + } + + return finalCmd, completions, directive, nil +} + +func helpOrVersionFlagPresent(cmd *Command) bool { + if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil && + len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { + return true + } + if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { + return true + } + return false +} + +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { + if nonCompletableFlag(flag) { + return []string{} + } + + var completions []string + flagName := "--" + flag.Name + if strings.HasPrefix(flagName, toComplete) { + // Flag without the = + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + + // Why suggest both long forms: --flag and --flag= ? + // This forces the user to *always* have to type either an = or a space after the flag name. + // Let's be nice and avoid making users have to do that. + // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. + // The = form will still work, we just won't suggest it. + // This also makes the list of suggested flags shorter as we avoid all the = forms. + // + // if len(flag.NoOptDefVal) == 0 { + // // Flag requires a value, so it can be suffixed with = + // flagName += "=" + // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // } + } + + flagName = "-" + flag.Shorthand + if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + } + + return completions +} + +func completeRequireFlags(finalCmd *Command, toComplete string) []string { + var completions []string + + doCompleteRequiredFlags := func(flag *pflag.Flag) { + if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { + if !flag.Changed { + // If the flag is not already present, we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + + return completions +} + +func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { + if finalCmd.DisableFlagParsing { + // We only do flag completion if we are allowed to parse flags + // This is important for commands which have requested to do their own flag completion. + return nil, args, lastArg, nil + } + + var flagName string + trimmedArgs := args + flagWithEqual := false + orgLastArg := lastArg + + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as that function + // requires the flag name to be complete + if len(lastArg) > 0 && lastArg[0] == '-' { + if index := strings.Index(lastArg, "="); index >= 0 { + // Flag with an = + if strings.HasPrefix(lastArg[:index], "--") { + // Flag has full name + flagName = lastArg[2:index] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = lastArg[index-1 : index] + } + lastArg = lastArg[index+1:] + flagWithEqual = true + } else { + // Normal flag completion + return nil, args, lastArg, nil + } + } + + if len(flagName) == 0 { + if len(args) > 0 { + prevArg := args[len(args)-1] + if isFlagArg(prevArg) { + // Only consider the case where the flag does not contain an =. + // If the flag contains an = it means it has already been fully processed, + // so we don't need to deal with it here. + if index := strings.Index(prevArg, "="); index < 0 { + if strings.HasPrefix(prevArg, "--") { + // Flag has full name + flagName = prevArg[2:] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = prevArg[len(prevArg)-1:] + } + // Remove the uncompleted flag or else there could be an error created + // for an invalid value for that flag + trimmedArgs = args[:len(args)-1] + } + } + } + } + + if len(flagName) == 0 { + // Not doing flag completion + return nil, trimmedArgs, lastArg, nil + } + + flag := findFlag(finalCmd, flagName) + if flag == nil { + // Flag not supported by this command, the interspersed option might be set so return the original args + return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} + } + + if !flagWithEqual { + if len(flag.NoOptDefVal) != 0 { + // We had assumed dealing with a two-word flag but the flag is a boolean flag. + // In that case, there is no value following it, so we are not really doing flag completion. + // Reset everything to do noun completion. + trimmedArgs = args + flag = nil + } + } + + return flag, trimmedArgs, lastArg, nil +} + +// InitDefaultCompletionCmd adds a default 'completion' command to c. +// This function will do nothing if any of the following is true: +// 1- the feature has been explicitly disabled by the program, +// 2- c has no subcommands (to avoid creating one), +// 3- c already has a 'completion' command provided by the program. +func (c *Command) InitDefaultCompletionCmd() { + if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { + return + } + + for _, cmd := range c.commands { + if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { + // A completion command is already available + return + } + } + + haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + + completionCmd := &Command{ + Use: compCmdName, + Short: "Generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. +See each sub-command's help for details on how to use the generated script. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + Hidden: c.CompletionOptions.HiddenDefaultCmd, + GroupID: c.completionCommandGroupID, + } + c.AddCommand(completionCmd) + + out := c.OutOrStdout() + noDesc := c.CompletionOptions.DisableDescriptions + shortDesc := "Generate the autocompletion script for %s" + bash := &Command{ + Use: "bash", + Short: fmt.Sprintf(shortDesc, "bash"), + Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(%[1]s completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion bash > /etc/bash_completion.d/%[1]s + +#### macOS: + + %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + DisableFlagsInUseLine: true, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenBashCompletionV2(out, !noDesc) + }, + } + if haveNoDescFlag { + bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + zsh := &Command{ + Use: "zsh", + Short: fmt.Sprintf(shortDesc, "zsh"), + Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(%[1]s completion zsh) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion zsh > "${fpath[1]}/_%[1]s" + +#### macOS: + + %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenZshCompletionNoDesc(out) + } + return cmd.Root().GenZshCompletion(out) + }, + } + if haveNoDescFlag { + zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + fish := &Command{ + Use: "fish", + Short: fmt.Sprintf(shortDesc, "fish"), + Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + %[1]s completion fish | source + +To load completions for every new session, execute once: + + %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenFishCompletion(out, !noDesc) + }, + } + if haveNoDescFlag { + fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + powershell := &Command{ + Use: "powershell", + Short: fmt.Sprintf(shortDesc, "powershell"), + Long: fmt.Sprintf(`Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + %[1]s completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenPowerShellCompletion(out) + } + return cmd.Root().GenPowerShellCompletionWithDesc(out) + + }, + } + if haveNoDescFlag { + powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + completionCmd.AddCommand(bash, zsh, fish, powershell) +} + +func findFlag(cmd *Command, name string) *pflag.Flag { + flagSet := cmd.Flags() + if len(name) == 1 { + // First convert the short flag into a long flag + // as the cmd.Flag() search only accepts long flags + if short := flagSet.ShorthandLookup(name); short != nil { + name = short.Name + } else { + set := cmd.InheritedFlags() + if short = set.ShorthandLookup(name); short != nil { + name = short.Name + } else { + return nil + } + } + } + return cmd.Flag(name) +} + +// CompDebug prints the specified string to the same file as where the +// completion script prints its logs. +// Note that completion printouts should never be on stdout as they would +// be wrongly interpreted as actual completion choices by the completion script. +func CompDebug(msg string, printToStdErr bool) { + msg = fmt.Sprintf("[Debug] %s", msg) + + // Such logs are only printed when the user has set the environment + // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. + if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { + f, err := os.OpenFile(path, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err == nil { + defer f.Close() + WriteStringAndCheck(f, msg) + } + } + + if printToStdErr { + // Must print to stderr for this not to be read by the completion script. + fmt.Fprint(os.Stderr, msg) + } +} + +// CompDebugln prints the specified string with a newline at the end +// to the same file as where the completion script prints its logs. +// Such logs are only printed when the user has set the environment +// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. +func CompDebugln(msg string, printToStdErr bool) { + CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) +} + +// CompError prints the specified completion message to stderr. +func CompError(msg string) { + msg = fmt.Sprintf("[Error] %s", msg) + CompDebug(msg, true) +} + +// CompErrorln prints the specified completion message to stderr with a newline at the end. +func CompErrorln(msg string) { + CompError(fmt.Sprintf("%s\n", msg)) +} diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go new file mode 100644 index 000000000..12ca0d2b1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -0,0 +1,292 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +func genFishComp(buf io.StringWriter, name string, includeDesc bool) { + // Variables should not contain a '-' or ':' character + nameForVar := name + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") + + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` +function __%[1]s_debug + set -l file "$BASH_COMP_DEBUG_FILE" + if test -n "$file" + echo "$argv" >> $file + end +end + +function __%[1]s_perform_completion + __%[1]s_debug "Starting __%[1]s_perform_completion" + + # Extract all args except the last one + set -l args (commandline -opc) + # Extract the last arg and escape it in case it is a space + set -l lastArg (string escape -- (commandline -ct)) + + __%[1]s_debug "args: $args" + __%[1]s_debug "last arg: $lastArg" + + # Disable ActiveHelp which is not supported for fish shell + set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" + + __%[1]s_debug "Calling $requestComp" + set -l results (eval $requestComp 2> /dev/null) + + # Some programs may output extra empty lines after the directive. + # Let's ignore them or else it will break completion. + # Ref: https://github.com/spf13/cobra/issues/1279 + for line in $results[-1..1] + if test (string trim -- $line) = "" + # Found an empty line, remove it + set results $results[1..-2] + else + # Found non-empty line, we have our proper output + break + end + end + + set -l comps $results[1..-2] + set -l directiveLine $results[-1] + + # For Fish, when completing a flag with an = (e.g., -n=) + # completions must be prefixed with the flag + set -l flagPrefix (string match -r -- '-.*=' "$lastArg") + + __%[1]s_debug "Comps: $comps" + __%[1]s_debug "DirectiveLine: $directiveLine" + __%[1]s_debug "flagPrefix: $flagPrefix" + + for comp in $comps + printf "%%s%%s\n" "$flagPrefix" "$comp" + end + + printf "%%s\n" "$directiveLine" +end + +# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result +function __%[1]s_perform_completion_once + __%[1]s_debug "Starting __%[1]s_perform_completion_once" + + if test -n "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" + return 0 + end + + set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completions, probably due to a failure" + return 1 + end + + __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" + return 0 +end + +# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run +function __%[1]s_clear_perform_completion_once_result + __%[1]s_debug "" + __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" + set --erase __%[1]s_perform_completion_once_result + __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" +end + +function __%[1]s_requires_order_preservation + __%[1]s_debug "" + __%[1]s_debug "========= checking if order preservation is required ==========" + + __%[1]s_perform_completion_once + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Error determining if order preservation is required" + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveKeepOrder %[9]d + set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) + __%[1]s_debug "Keeporder is: $keeporder" + + if test $keeporder -ne 0 + __%[1]s_debug "This does require order preservation" + return 0 + end + + __%[1]s_debug "This doesn't require order preservation" + return 1 +end + + +# This function does two things: +# - Obtain the completions and store them in the global __%[1]s_comp_results +# - Return false if file completion should be performed +function __%[1]s_prepare_completions + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + + # Start fresh + set --erase __%[1]s_comp_results + + __%[1]s_perform_completion_once + __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" + + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completion, probably due to a failure" + # Might as well do file completion, in case it helps + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] + + __%[1]s_debug "Completions are: $__%[1]s_comp_results" + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveError %[4]d + set -l shellCompDirectiveNoSpace %[5]d + set -l shellCompDirectiveNoFileComp %[6]d + set -l shellCompDirectiveFilterFileExt %[7]d + set -l shellCompDirectiveFilterDirs %[8]d + + if test -z "$directive" + set directive 0 + end + + set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) + if test $compErr -eq 1 + __%[1]s_debug "Received error directive: aborting." + # Might as well do file completion, in case it helps + return 1 + end + + set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) + set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) + if test $filefilter -eq 1; or test $dirfilter -eq 1 + __%[1]s_debug "File extension filtering or directory filtering not supported" + # Do full file completion instead + return 1 + end + + set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) + set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) + + __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" + + # If we want to prevent a space, or if file completion is NOT disabled, + # we need to count the number of valid completions. + # To do so, we will filter on prefix as the completions we have received + # may not already be filtered so as to allow fish to match on different + # criteria than the prefix. + if test $nospace -ne 0; or test $nofiles -eq 0 + set -l prefix (commandline -t | string escape --style=regex) + __%[1]s_debug "prefix: $prefix" + + set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) + set --global __%[1]s_comp_results $completions + __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" + + # Important not to quote the variable for count to work + set -l numComps (count $__%[1]s_comp_results) + __%[1]s_debug "numComps: $numComps" + + if test $numComps -eq 1; and test $nospace -ne 0 + # We must first split on \t to get rid of the descriptions to be + # able to check what the actual completion will be. + # We don't need descriptions anyway since there is only a single + # real completion which the shell will expand immediately. + set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) + + # Fish won't add a space if the completion ends with any + # of the following characters: @=/:., + set -l lastChar (string sub -s -1 -- $split) + if not string match -r -q "[@=/:.,]" -- "$lastChar" + # In other cases, to support the "nospace" directive we trick the shell + # by outputting an extra, longer completion. + __%[1]s_debug "Adding second completion to perform nospace directive" + set --global __%[1]s_comp_results $split[1] $split[1]. + __%[1]s_debug "Completions are now: $__%[1]s_comp_results" + end + end + + if test $numComps -eq 0; and test $nofiles -eq 0 + # To be consistent with bash and zsh, we only trigger file + # completion when there are no other completions + __%[1]s_debug "Requesting file completion" + return 1 + end + end + + return 0 +end + +# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves +# so we can properly delete any completions provided by another script. +# Only do this if the program can be found, or else fish may print some errors; besides, +# the existing completions will only be loaded if the program can be found. +if type -q "%[2]s" + # The space after the program name is essential to trigger completion for the program + # and not completion of the program name itself. + # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. + complete --do-complete "%[2]s " > /dev/null 2>&1 +end + +# Remove any pre-existing completions for the program since we will be handling all of them. +complete -c %[2]s -e + +# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global +complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' +# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results +# which provides the program's completion choices. +# If this doesn't require order preservation, we don't use the -k flag +complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +# otherwise we use the -k flag +complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +`, nameForVar, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) +} + +// GenFishCompletion generates fish completion file and writes to the passed writer. +func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genFishComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +// GenFishCompletionFile generates fish completion file. +func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenFishCompletion(outFile, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md new file mode 100644 index 000000000..19b2ed129 --- /dev/null +++ b/vendor/github.com/spf13/cobra/fish_completions.md @@ -0,0 +1,4 @@ +## Generating Fish Completions For Your cobra.Command + +Please refer to [Shell Completions](shell_completions.md) for details. + diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go new file mode 100644 index 000000000..b35fde155 --- /dev/null +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -0,0 +1,224 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +const ( + requiredAsGroup = "cobra_annotation_required_if_others_set" + mutuallyExclusive = "cobra_annotation_mutually_exclusive" +) + +// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors +// if the command is invoked with a subset (but not all) of the given flags. +func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) + } + if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + +// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors +// if the command is invoked with more than one flag from the given set of flags. +func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) + } + // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. + if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + panic(err) + } + } +} + +// ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the +// first error encountered. +func (c *Command) ValidateFlagGroups() error { + if c.DisableFlagParsing { + return nil + } + + flags := c.Flags() + + // groupStatus format is the list of flags as a unique ID, + // then a map of each flag name and whether it is set or not. + groupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + flags.VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + }) + + if err := validateRequiredFlagGroups(groupStatus); err != nil { + return err + } + if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { + return err + } + return nil +} + +func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool { + for _, fname := range flagnames { + f := fs.Lookup(fname) + if f == nil { + return false + } + } + return true +} + +func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) { + groupInfo, found := pflag.Annotations[annotation] + if found { + for _, group := range groupInfo { + if groupStatus[group] == nil { + flagnames := strings.Split(group, " ") + + // Only consider this flag group at all if all the flags are defined. + if !hasAllFlags(flags, flagnames...) { + continue + } + + groupStatus[group] = map[string]bool{} + for _, name := range flagnames { + groupStatus[group][name] = false + } + } + + groupStatus[group][pflag.Name] = pflag.Changed + } + } +} + +func validateRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + + unset := []string{} + for flagname, isSet := range flagnameAndStatus { + if !isSet { + unset = append(unset, flagname) + } + } + if len(unset) == len(flagnameAndStatus) || len(unset) == 0 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(unset) + return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset) + } + + return nil +} + +func validateExclusiveFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) == 0 || len(set) == 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set) + } + return nil +} + +func sortedKeys(m map[string]map[string]bool) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +// enforceFlagGroupsForCompletion will do the following: +// - when a flag in a group is present, other flags in the group will be marked required +// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden +// This allows the standard completion logic to behave appropriately for flag groups +func (c *Command) enforceFlagGroupsForCompletion() { + if c.DisableFlagParsing { + return + } + + flags := c.Flags() + groupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + c.Flags().VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + }) + + // If a flag that is part of a group is present, we make all the other flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range groupStatus { + for _, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the group is set, mark the other ones as required + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + } + + // If a flag that is mutually exclusive to others is present, we hide the other + // flags of that group so the shell completion does not suggest them + for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { + for flagName, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the mutually exclusive group is set, mark the other ones as hidden + // Don't mark the flag that is already set as hidden because it may be an + // array or slice flag and therefore must continue being suggested + for _, fName := range strings.Split(flagList, " ") { + if fName != flagName { + flag := c.Flags().Lookup(fName) + flag.Hidden = true + } + } + } + } + } +} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go new file mode 100644 index 000000000..177d2755f --- /dev/null +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -0,0 +1,325 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but +// can be downloaded separately for windows 7 or 8.1). + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { + // Variables should not contain a '-' or ':' character + nameForVar := name + nameForVar = strings.Replace(nameForVar, "-", "_", -1) + nameForVar = strings.Replace(nameForVar, ":", "_", -1) + + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- + +function __%[1]s_debug { + if ($env:BASH_COMP_DEBUG_FILE) { + "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" + } +} + +filter __%[1]s_escapeStringWithSpecialChars { +`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` +} + +[scriptblock]$__%[2]sCompleterBlock = { + param( + $WordToComplete, + $CommandAst, + $CursorPosition + ) + + # Get the current command line and convert into a string + $Command = $CommandAst.CommandElements + $Command = "$Command" + + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CursorPosition location, so we need + # to truncate the command-line ($Command) up to the $CursorPosition location. + # Make sure the $Command is longer then the $CursorPosition before we truncate. + # This happens because the $Command does not include the last space. + if ($Command.Length -gt $CursorPosition) { + $Command=$Command.Substring(0,$CursorPosition) + } + __%[1]s_debug "Truncated command: $Command" + + $ShellCompDirectiveError=%[4]d + $ShellCompDirectiveNoSpace=%[5]d + $ShellCompDirectiveNoFileComp=%[6]d + $ShellCompDirectiveFilterFileExt=%[7]d + $ShellCompDirectiveFilterDirs=%[8]d + $ShellCompDirectiveKeepOrder=%[9]d + + # Prepare the command to request completions for the program. + # Split the command at the first space to separate the program and arguments. + $Program,$Arguments = $Command.Split(" ",2) + + $RequestComp="$Program %[3]s $Arguments" + __%[1]s_debug "RequestComp: $RequestComp" + + # we cannot use $WordToComplete because it + # has the wrong values if the cursor was moved + # so use the last argument + if ($WordToComplete -ne "" ) { + $WordToComplete = $Arguments.Split(" ")[-1] + } + __%[1]s_debug "New WordToComplete: $WordToComplete" + + + # Check for flag with equal sign + $IsEqualFlag = ($WordToComplete -Like "--*=*" ) + if ( $IsEqualFlag ) { + __%[1]s_debug "Completing equal sign flag" + # Remove the flag part + $Flag,$WordToComplete = $WordToComplete.Split("=",2) + } + + if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + # PowerShell 7.2+ changed the way how the arguments are passed to executables, + # so for pre-7.2 or when Legacy argument passing is enabled we need to use +`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` + if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or + ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or + (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and + $PSNativeCommandArgumentPassing -eq 'Legacy')) { +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } else { + $RequestComp="$RequestComp" + ' ""' + } + } + + __%[1]s_debug "Calling $RequestComp" + # First disable ActiveHelp which is not supported for Powershell + $env:%[10]s=0 + + #call the command store the output in $out and redirect stderr and stdout to null + # $Out is an array contains each line per element + Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null + + # get directive from last line + [int]$Directive = $Out[-1].TrimStart(':') + if ($Directive -eq "") { + # There is no directive specified + $Directive = 0 + } + __%[1]s_debug "The completion directive is: $Directive" + + # remove directive (last element) from out + $Out = $Out | Where-Object { $_ -ne $Out[-1] } + __%[1]s_debug "The completions are: $Out" + + if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + } + + $Longest = 0 + [Array]$Values = $Out | ForEach-Object { + #Split the output in name and description +`+" $Name, $Description = $_.Split(\"`t\",2)"+` + __%[1]s_debug "Name: $Name Description: $Description" + + # Look for the longest completion so that we can format things nicely + if ($Longest -lt $Name.Length) { + $Longest = $Name.Length + } + + # Set the description to a one space string if there is none set. + # This is needed because the CompletionResult does not accept an empty string as argument + if (-Not $Description) { + $Description = " " + } + @{Name="$Name";Description="$Description"} + } + + + $Space = " " + if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { + # remove the space here + __%[1]s_debug "ShellCompDirectiveNoSpace is called" + $Space = "" + } + + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } + + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + + # Join the flag back if we have an equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + + # we sort the values in ascending order by name if keep order isn't passed + if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { + $Values = $Values | Sort-Object -Property Name + } + + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { + __%[1]s_debug "ShellCompDirectiveNoFileComp is called" + + if ($Values.Length -eq 0) { + # Just print an empty string here so the + # shell does not start to complete paths. + # We cannot use CompletionResult here because + # it does not accept an empty string as argument. + "" + return + } + } + + # Get the current mode + $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function + __%[1]s_debug "Mode: $Mode" + + $Values | ForEach-Object { + + # store temporary because switch will overwrite $_ + $comp = $_ + + # PowerShell supports three different completion modes + # - TabCompleteNext (default windows style - on each key press the next option is displayed) + # - Complete (works like bash) + # - MenuComplete (works like zsh) + # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function + + # CompletionResult Arguments: + # 1) CompletionText text to be used as the auto completion result + # 2) ListItemText text to be displayed in the suggestion list + # 3) ResultType type of completion result + # 4) ToolTip text for the tooltip with details about the object + + switch ($Mode) { + + # bash like + "Complete" { + + if ($Values.Length -eq 1) { + __%[1]s_debug "Only one completion left" + + # insert space after value + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + } else { + # Add the proper number of spaces to align the descriptions + while($comp.Name.Length -lt $Longest) { + $comp.Name = $comp.Name + " " + } + + # Check for empty description and only add parentheses if needed + if ($($comp.Description) -eq " " ) { + $Description = "" + } else { + $Description = " ($($comp.Description))" + } + + [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } + } + + # zsh like + "MenuComplete" { + # insert space after value + # MenuComplete will automatically show the ToolTip of + # the highlighted value at the bottom of the suggestions. + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + + # TabCompleteNext and in case we get something unknown + Default { + # Like MenuComplete but we don't want to add a space here because + # the user need to press space anyway to get the completion. + # Description will not be shown because that's not possible with TabCompleteNext + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + } + + } +} + +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock +`, name, nameForVar, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) +} + +func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genPowerShellComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.genPowerShellCompletion(outFile, includeDesc) +} + +// GenPowerShellCompletionFile generates powershell completion file without descriptions. +func (c *Command) GenPowerShellCompletionFile(filename string) error { + return c.genPowerShellCompletionFile(filename, false) +} + +// GenPowerShellCompletion generates powershell completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletion(w io.Writer) error { + return c.genPowerShellCompletion(w, false) +} + +// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. +func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { + return c.genPowerShellCompletionFile(filename, true) +} + +// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { + return c.genPowerShellCompletion(w, true) +} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md new file mode 100644 index 000000000..c449f1e5c --- /dev/null +++ b/vendor/github.com/spf13/cobra/powershell_completions.md @@ -0,0 +1,3 @@ +# Generating PowerShell Completions For Your Own cobra.Command + +Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md new file mode 100644 index 000000000..8a291eb20 --- /dev/null +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -0,0 +1,64 @@ +## Projects using Cobra + +- [Allero](https://github.com/allero-io/allero) +- [Arewefastyet](https://benchmark.vitess.io) +- [Arduino CLI](https://github.com/arduino/arduino-cli) +- [Bleve](https://blevesearch.com/) +- [Cilium](https://cilium.io/) +- [CloudQuery](https://github.com/cloudquery/cloudquery) +- [CockroachDB](https://www.cockroachlabs.com/) +- [Constellation](https://github.com/edgelesssys/constellation) +- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) +- [Datree](https://github.com/datreeio/datree) +- [Delve](https://github.com/derekparker/delve) +- [Docker (distribution)](https://github.com/docker/distribution) +- [Etcd](https://etcd.io/) +- [Gardener](https://github.com/gardener/gardenctl) +- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) +- [Git Bump](https://github.com/erdaltsksn/git-bump) +- [GitHub CLI](https://github.com/cli/cli) +- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) +- [Golangci-lint](https://golangci-lint.run) +- [GopherJS](https://github.com/gopherjs/gopherjs) +- [GoReleaser](https://goreleaser.com) +- [Helm](https://helm.sh) +- [Hugo](https://gohugo.io) +- [Infracost](https://github.com/infracost/infracost) +- [Istio](https://istio.io) +- [Kool](https://github.com/kool-dev/kool) +- [Kubernetes](https://kubernetes.io/) +- [Kubescape](https://github.com/kubescape/kubescape) +- [KubeVirt](https://github.com/kubevirt/kubevirt) +- [Linkerd](https://linkerd.io/) +- [Mattermost-server](https://github.com/mattermost/mattermost-server) +- [Mercure](https://mercure.rocks/) +- [Meroxa CLI](https://github.com/meroxa/cli) +- [Metal Stack CLI](https://github.com/metal-stack/metalctl) +- [Moby (former Docker)](https://github.com/moby/moby) +- [Moldy](https://github.com/Moldy-Community/moldy) +- [Multi-gitter](https://github.com/lindell/multi-gitter) +- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +- [nFPM](https://nfpm.goreleaser.com) +- [Okteto](https://github.com/okteto/okteto) +- [OpenShift](https://www.openshift.com/) +- [Ory Hydra](https://github.com/ory/hydra) +- [Ory Kratos](https://github.com/ory/kratos) +- [Pixie](https://github.com/pixie-io/pixie) +- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) +- [Pouch](https://github.com/alibaba/pouch) +- [ProjectAtomic (enterprise)](https://www.projectatomic.io/) +- [Prototool](https://github.com/uber/prototool) +- [Pulumi](https://www.pulumi.com) +- [QRcp](https://github.com/claudiodangelis/qrcp) +- [Random](https://github.com/erdaltsksn/random) +- [Rclone](https://rclone.org/) +- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) +- [Sia](https://github.com/SiaFoundation/siad) +- [Skaffold](https://skaffold.dev/) +- [Tendermint](https://github.com/tendermint/tendermint) +- [Twitch CLI](https://github.com/twitchdev/twitch-cli) +- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- [Vitess](https://vitess.io) +- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) +- [Werf](https://werf.io/) +- [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go new file mode 100644 index 000000000..b035742d3 --- /dev/null +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -0,0 +1,98 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "github.com/spf13/pflag" +) + +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) +} + +// MarkPersistentFlagRequired instructs the various shell completion implementations to +// prioritize the named persistent flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) +} + +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for the named flag to the specified file extensions. +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// The bash completion script will call the bash function f for the flag. +// +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) +} + +// MarkPersistentFlagFilename instructs the various shell completion +// implementations to limit completions for the named persistent flag to the +// specified file extensions. +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for the named flag to the specified file extensions. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// The bash completion script will call the bash function f for the flag. +// +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// limit completions for the named flag to directory names. +func (c *Command) MarkFlagDirname(name string) error { + return MarkFlagDirname(c.Flags(), name) +} + +// MarkPersistentFlagDirname instructs the various shell completion +// implementations to limit completions for the named persistent flag to +// directory names. +func (c *Command) MarkPersistentFlagDirname(name string) error { + return MarkFlagDirname(c.PersistentFlags(), name) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// limit completions for the named flag to directory names. +func MarkFlagDirname(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{}) +} diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md new file mode 100644 index 000000000..065c0621d --- /dev/null +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -0,0 +1,576 @@ +# Generating shell completions + +Cobra can generate shell completions for multiple shells. +The currently supported shells are: +- Bash +- Zsh +- fish +- PowerShell + +Cobra will automatically provide your program with a fully functional `completion` command, +similarly to how it provides the `help` command. + +## Creating your own completion command + +If you do not wish to use the default `completion` command, you can choose to +provide your own, which will take precedence over the default one. (This also provides +backwards-compatibility with programs that already have their own `completion` command.) + +If you are using the `cobra-cli` generator, +which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), +you can create a completion command by running + +```bash +cobra-cli add completion +``` +and then modifying the generated `cmd/completion.go` file to look something like this +(writing the shell script to stdout allows the most flexible use): + +```go +var completionCmd = &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", + Long: fmt.Sprintf(`To load completions: + +Bash: + + $ source <(%[1]s completion bash) + + # To load completions for each session, execute once: + # Linux: + $ %[1]s completion bash > /etc/bash_completion.d/%[1]s + # macOS: + $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s + +Zsh: + + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" + + # You will need to start a new shell for this setup to take effect. + +fish: + + $ %[1]s completion fish | source + + # To load completions for each session, execute once: + $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +PowerShell: + + PS> %[1]s completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> %[1]s completion powershell > %[1]s.ps1 + # and source this file from your PowerShell profile. +`,cmd.Root().Name()), + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + }, +} +``` + +**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. + +## Adapting the default completion command + +Cobra provides a few options for the default `completion` command. To configure such options you must set +the `CompletionOptions` field on the *root* command. + +To tell Cobra *not* to provide the default `completion` command: +``` +rootCmd.CompletionOptions.DisableDefaultCmd = true +``` + +To tell Cobra to mark the default `completion` command as *hidden*: +``` +rootCmd.CompletionOptions.HiddenDefaultCmd = true +``` + +To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: +``` +rootCmd.CompletionOptions.DisableNoDescFlag = true +``` + +To tell Cobra to completely disable descriptions for completions: +``` +rootCmd.CompletionOptions.DisableDescriptions = true +``` + +# Customizing completions + +The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. + +## Completion of nouns + +### Static completion of nouns + +Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. +For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. +Some simplified code from `kubectl get` looks like: + +```go +validArgs = []string{ "pod", "node", "service", "replicationcontroller" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + cobra.CheckErr(RunGet(f, out, cmd, args)) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: + +```bash +$ kubectl get [tab][tab] +node pod replicationcontroller service +``` + +#### Aliases for nouns + +If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go +argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. + +### Dynamic completion of nouns + +In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. +Simplified code from `helm status` looks like: + +```go +cmd := &cobra.Command{ + Use: "status RELEASE_NAME", + Short: "Display the status of the named release", + Long: status_long, + RunE: func(cmd *cobra.Command, args []string) { + RunGet(args[0]) + }, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) != 0 { + return nil, cobra.ShellCompDirectiveNoFileComp + } + return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp + }, +} +``` +Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. +Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: + +```bash +$ helm status [tab][tab] +harbor notary rook thanos +``` +You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` +```go +// Indicates that the shell will perform its default behavior after completions +// have been provided (this implies none of the other directives). +ShellCompDirectiveDefault + +// Indicates an error occurred and completions should be ignored. +ShellCompDirectiveError + +// Indicates that the shell should not add a space after the completion, +// even if there is a single completion provided. +ShellCompDirectiveNoSpace + +// Indicates that the shell should not provide file completion even when +// no completion is provided. +ShellCompDirectiveNoFileComp + +// Indicates that the returned completions should be used as file extension filters. +// For example, to complete only files of the form *.json or *.yaml: +// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt +// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() +// is a shortcut to using this directive explicitly. +// +ShellCompDirectiveFilterFileExt + +// Indicates that only directory names should be provided in file completion. +// For example: +// return nil, ShellCompDirectiveFilterDirs +// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. +// +// To request directory names within another directory, the returned completions +// should specify a single directory name within which to search. For example, +// to complete directories within "themes/": +// return []string{"themes"}, ShellCompDirectiveFilterDirs +// +ShellCompDirectiveFilterDirs + +// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order +// in which the completions are provided +ShellCompDirectiveKeepOrder +``` + +***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. + +#### Debugging + +Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: +```bash +$ helm __complete status har +harbor +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: +```bash +$ helm __complete status "" +harbor +notary +rook +thanos +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: +```go +// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE +// is set to a file path) and optionally prints to stderr. +cobra.CompDebug(msg string, printToStdErr bool) { +cobra.CompDebugln(msg string, printToStdErr bool) + +// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE +// is set to a file path) and to stderr. +cobra.CompError(msg string) +cobra.CompErrorln(msg string) +``` +***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. + +## Completions for flags + +### Mark flags as required + +Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +$ kubectl exec [tab][tab] +-c --container= -p --pod= +``` + +### Specify dynamic flag completion + +As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. + +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault +}) +``` +Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: + +```bash +$ helm status --output [tab][tab] +json table yaml +``` + +#### Debugging + +You can also easily debug your Go completion code for flags: +```bash +$ helm __complete status --output "" +json +table +yaml +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. + +### Specify valid filename extensions for flags that take a filename + +To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: +```go +flagName := "output" +cmd.MarkFlagFilename(flagName, "yaml", "json") +``` +or +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) +``` + +### Limit flag completions to directory names + +To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: +```go +flagName := "output" +cmd.MarkFlagDirname(flagName) +``` +or +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveFilterDirs +}) +``` +To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs +}) +``` +### Descriptions for completions + +Cobra provides support for completion descriptions. Such descriptions are supported for each shell +(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). +For commands and flags, Cobra will provide the descriptions automatically, based on usage information. +For example, using zsh: +``` +$ helm s[tab] +search -- search for a keyword in charts +show -- show information of a chart +status -- displays the status of the named release +``` +while using fish: +``` +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) +``` + +Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: +```go +ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp +}} +``` +or +```go +ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} +``` + +If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: + +```bash +$ source <(helm completion bash) +$ helm completion [tab][tab] +bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) +fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) + +$ source <(helm completion bash --no-descriptions) +$ helm completion [tab][tab] +bash fish powershell zsh +``` +## Bash completions + +### Dependencies + +The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) + +### Aliases + +You can also configure `bash` aliases for your program and they will also support completions. + +```bash +alias aliasname=origcommand +complete -o default -F __start_origcommand aliasname + +# and now when you run `aliasname` completion will make +# suggestions as it did for `origcommand`. + +$ aliasname +completion firstcommand secondcommand +``` +### Bash legacy dynamic completions + +For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. +Please refer to [Bash Completions](bash_completions.md) for details. + +### Bash completion V2 + +Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling +`GenBashCompletion()` or `GenBashCompletionFile()`. + +A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or +`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion +(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion +solution described in this document. +Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash +completion V2 solution which provides the following extra features: +- Supports completion descriptions (like the other shells) +- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) +- Streamlined user experience thanks to a completion behavior aligned with the other shells + +`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` +you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra +will provide the description automatically based on usage information. You can choose to make this option configurable by +your users. + +``` +# With descriptions +$ helm s[tab][tab] +search (search for a keyword in charts) status (display the status of the named release) +show (show information of a chart) + +# Without descriptions +$ helm s[tab][tab] +search show status +``` +**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. +## Zsh completions + +Cobra supports native zsh completion generated from the root `cobra.Command`. +The generated completion script should be put somewhere in your `$fpath` and be named +`_`. You will need to start a new shell for the completions to become available. + +Zsh supports descriptions for completions. Cobra will provide the description automatically, +based on usage information. Cobra provides a way to completely disable such descriptions by +using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make +this a configurable option to your users. +``` +# With descriptions +$ helm s[tab] +search -- search for a keyword in charts +show -- show information of a chart +status -- displays the status of the named release + +# Without descriptions +$ helm s[tab] +search show status +``` +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. + +### Limitations + +* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. + * You should instead use `RegisterFlagCompletionFunc()`. + +### Zsh completions standardization + +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. +Please refer to [Zsh Completions](zsh_completions.md) for details. + +## fish completions + +Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. +``` +# With descriptions +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# Without descriptions +$ helm s[tab] +search show status +``` +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `fish`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `fish`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) + +## PowerShell completions + +Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. + +The script is designed to support all three PowerShell completion modes: + +* TabCompleteNext (default windows style - on each key press the next option is displayed) +* Complete (works like bash) +* MenuComplete (works like zsh) + +You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. + +Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. + +``` +# With descriptions and Mode 'Complete' +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. +$ helm s[tab] +search show status + +search for a keyword in charts + +# Without descriptions +$ helm s[tab] +search show status +``` +### Aliases + +You can also configure `powershell` aliases for your program and they will also support completions. + +``` +$ sal aliasname origcommand +$ Register-ArgumentCompleter -CommandName 'aliasname' -ScriptBlock $__origcommandCompleterBlock + +# and now when you run `aliasname` completion will make +# suggestions as it did for `origcommand`. + +$ aliasname +completion firstcommand secondcommand +``` +The name of the completer block variable is of the form `$__CompleterBlock` where every `-` and `:` in the program name have been replaced with `_`, to respect powershell naming syntax. + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `powershell`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `powershell`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md new file mode 100644 index 000000000..85201d840 --- /dev/null +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -0,0 +1,726 @@ +# User Guide + +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +## Using the Cobra Generator + +Cobra-CLI is its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) + +## Using the Cobra Library + +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. +You will optionally provide additional commands as you see fit. + +### Create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at https://gohugo.io/documentation/`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} +``` + +You will additionally define flags and handle configuration in your init() function. + +For example cmd/root.go: + +```go +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + // Used for flags. + cfgFile string + userLicense string + + rootCmd = &cobra.Command{ + Use: "cobra-cli", + Short: "A generator for Cobra based Applications", + Long: `Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + } +) + +// Execute executes the root command. +func Execute() error { + return rootCmd.Execute() +} + +func init() { + cobra.OnInitialize(initConfig) + + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") + rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") + + rootCmd.AddCommand(addCmd) + rootCmd.AddCommand(initCmd) +} + +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := os.UserHomeDir() + cobra.CheckErr(err) + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigType("yaml") + viper.SetConfigName(".cobra") + } + + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Organizing subcommands + +A command may have subcommands which in turn may have other subcommands. This is achieved by using +`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in +its own go package. + +The suggested approach is for the parent command to use `AddCommand` to add its most immediate +subcommands. For example, consider the following directory structure: + +```text +├── cmd +│   ├── root.go +│   └── sub1 +│   ├── sub1.go +│   └── sub2 +│   ├── leafA.go +│   ├── leafB.go +│   └── sub2.go +└── main.go +``` + +In this case: + +* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. +* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. +* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the + sub2 command. + +This approach ensures the subcommands are always included at compile time while avoiding cyclic +references. + +### Returning and handling errors + +If you wish to return an error to the caller of a command, `RunE` can be used. + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(tryCmd) +} + +var tryCmd = &cobra.Command{ + Use: "try", + Short: "Try and possibly fail at something", + RunE: func(cmd *cobra.Command, args []string) error { + if err := someFunc(); err != nil { + return err + } + return nil + }, +} +``` + +The error can then be caught at the execute function call. + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent', meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally, which will only apply to that specific command. + +```go +localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + +### Local Flag on Parent Commands + +By default, Cobra only parses local flags on the target command, and any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example, the persistent flag `author` is bound with `viper`. +**Note**: the variable `author` will not be set to the value from config, +when the `--author` flag is provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + +Or, for persistent flags: +```go +rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkPersistentFlagRequired("region") +``` + +### Flag Groups + +If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then +Cobra can enforce that requirement: +```go +rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)") +rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)") +rootCmd.MarkFlagsRequiredTogether("username", "password") +``` + +You can also prevent different flags from being provided together if they represent mutually +exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: +```go +rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") +rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") +rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") +``` + +In both of these cases: + - both local and persistent flags can be used + - **NOTE:** the group is only enforced on commands where every flag is defined + - a flag may appear in multiple groups + - a group may contain any number of flags + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field of `Command`. +The following validators are built in: + +- Number of arguments: + - `NoArgs` - report an error if there are any positional args. + - `ArbitraryArgs` - accept any number of args. + - `MinimumNArgs(int)` - report an error if less than N positional args are provided. + - `MaximumNArgs(int)` - report an error if more than N positional args are provided. + - `ExactArgs(int)` - report an error if there are not exactly N positional args. + - `RangeArgs(min, max)` - report an error if the number of args is not between `min` and `max`. +- Content of the arguments: + - `OnlyValidArgs` - report an error if there are any positional args not specified in the `ValidArgs` field of `Command`, which can optionally be set to a list of valid values for positional args. + +If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`. + +Moreover, `MatchAll(pargs ...PositionalArgs)` enables combining existing checks with arbitrary other checks. +For instance, if you want to report an error if there are not exactly N positional args OR if there are any positional +args that are not in the `ValidArgs` field of `Command`, you can call `MatchAll` on `ExactArgs` and `OnlyValidArgs`, as +shown below: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +It is possible to set any custom validator that satisfies `func(cmd *cobra.Command, args []string) error`. +For example: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + // Optionally run one of the validators provided by cobra + if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { + return err + } + // Run the custom validation logic + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable, meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Echo: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/). + +## Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + $ cobra-cli help + + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. + + Usage: + cobra-cli [command] + + Available Commands: + add Add a command to a Cobra Application + completion Generate the autocompletion script for the specified shell + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra-cli + -l, --license string name of license for the project + --viper use Viper for configuration + + Use "cobra-cli [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Grouping commands in help + +Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly +defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element +of that subcommand. The groups will appear in the help output in the same order as they are defined using different +calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using +`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use +with the following functions: + +```go +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage Message + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + $ cobra-cli --invalid + Error: unknown flag: --invalid + Usage: + cobra-cli [command] + + Available Commands: + add Add a command to a Cobra Application + completion Generate the autocompletion script for the specified shell + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra-cli + -l, --license string name of license for the project + --viper use Viper for configuration + + Use "cobra [command] --help" for more information about a command. + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. +Like help, the function and template are overridable through public methods: + +```go +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) +``` + +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + +## PreRun and PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() +} +``` + +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] + +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatically generated based on existing subcommands and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but make sense in your set of commands but for which +you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating documentation for your command + +Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). + +## Generating shell completions + +Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). + +## Providing Active Help + +Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go new file mode 100644 index 000000000..1856e4c7f --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -0,0 +1,308 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// GenZshCompletionFile generates zsh completion file including descriptions. +func (c *Command) GenZshCompletionFile(filename string) error { + return c.genZshCompletionFile(filename, true) +} + +// GenZshCompletion generates zsh completion file including descriptions +// and writes it to the passed writer. +func (c *Command) GenZshCompletion(w io.Writer) error { + return c.genZshCompletion(w, true) +} + +// GenZshCompletionFileNoDesc generates zsh completion file without descriptions. +func (c *Command) GenZshCompletionFileNoDesc(filename string) error { + return c.genZshCompletionFile(filename, false) +} + +// GenZshCompletionNoDesc generates zsh completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenZshCompletionNoDesc(w io.Writer) error { + return c.genZshCompletion(w, false) +} + +// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was +// not consistent with Bash completion. It has therefore been disabled. +// Instead, when no other completion is specified, file completion is done by +// default for every argument. One can disable file completion on a per-argument +// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. +// To achieve file extension filtering, one can use ValidArgsFunction and +// ShellCompDirectiveFilterFileExt. +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { + return nil +} + +// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore +// been disabled. +// To achieve the same behavior across all shells, one can use +// ValidArgs (for the first argument only) or ValidArgsFunction for +// any argument (can include the first one also). +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { + return nil +} + +func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.genZshCompletion(outFile, includeDesc) +} + +func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genZshComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genZshComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s +compdef _%[1]s %[1]s + +# zsh completion for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + local file="$BASH_COMP_DEBUG_FILE" + if [[ -n ${file} ]]; then + echo "$*" >> "${file}" + fi +} + +_%[1]s() +{ + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d + + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder + local -a completions + + __%[1]s_debug "\n========= starting completion logic ==========" + __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CURRENT location, so we need + # to truncate the command-line ($words) up to the $CURRENT location. + # (We cannot use $CURSOR as its value does not work when a command is an alias.) + words=("${=words[1,CURRENT]}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + lastParam=${words[-1]} + lastChar=${lastParam[-1]} + __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" + + # For zsh, when completing a flag with an = (e.g., %[1]s -n=) + # completions must be prefixed with the flag + setopt local_options BASH_REMATCH + if [[ "${lastParam}" =~ '-.*=' ]]; then + # We are dealing with a flag with an = + flagPrefix="-P ${BASH_REMATCH}" + fi + + # Prepare the command to obtain completions + requestComp="${words[1]} %[2]s ${words[2,-1]}" + if [ "${lastChar}" = "" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go completion code. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi + + __%[1]s_debug "About to call: eval ${requestComp}" + + # Use eval to handle any environment variables and such + out=$(eval ${requestComp} 2>/dev/null) + __%[1]s_debug "completion output: ${out}" + + # Extract the directive integer following a : from the last line + local lastLine + while IFS='\n' read -r line; do + lastLine=${line} + done < <(printf "%%s\n" "${out[@]}") + __%[1]s_debug "last line: ${lastLine}" + + if [ "${lastLine[1]}" = : ]; then + directive=${lastLine[2,-1]} + # Remove the directive including the : and the newline + local suffix + (( suffix=${#lastLine}+2)) + out=${out[1,-$suffix]} + else + # There is no directive specified. Leave $out as is. + __%[1]s_debug "No directive found. Setting do default" + directive=0 + fi + + __%[1]s_debug "directive: ${directive}" + __%[1]s_debug "completions: ${out}" + __%[1]s_debug "flagPrefix: ${flagPrefix}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + __%[1]s_debug "Completion received error. Ignoring completions." + return + fi + + local activeHelpMarker="%[9]s" + local endIndex=${#activeHelpMarker} + local startIndex=$((${#activeHelpMarker}+1)) + local hasActiveHelp=0 + while IFS='\n' read -r comp; do + # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker) + if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then + __%[1]s_debug "ActiveHelp found: $comp" + comp="${comp[$startIndex,-1]}" + if [ -n "$comp" ]; then + compadd -x "${comp}" + __%[1]s_debug "ActiveHelp will need delimiter" + hasActiveHelp=1 + fi + + continue + fi + + if [ -n "$comp" ]; then + # If requested, completions are returned with a description. + # The description is preceded by a TAB character. + # For zsh's _describe, we need to use a : instead of a TAB. + # We first need to escape any : as part of the completion itself. + comp=${comp//:/\\:} + + local tab="$(printf '\t')" + comp=${comp//$tab/:} + + __%[1]s_debug "Adding completion: ${comp}" + completions+=${comp} + lastComp=$comp + fi + done < <(printf "%%s\n" "${out[@]}") + + # Add a delimiter after the activeHelp statements, but only if: + # - there are completions following the activeHelp statements, or + # - file completion will be performed (so there will be choices after the activeHelp) + if [ $hasActiveHelp -eq 1 ]; then + if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then + __%[1]s_debug "Adding activeHelp delimiter" + compadd -x "--" + hasActiveHelp=0 + fi + fi + + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + __%[1]s_debug "Activating nospace." + noSpace="-S ''" + fi + + if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then + __%[1]s_debug "Activating keep order." + keepOrder="-V" + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local filteringCmd + filteringCmd='_files' + for filter in ${completions[@]}; do + if [ ${filter[1]} != '*' ]; then + # zsh requires a glob pattern to do file filtering + filter="\*.$filter" + fi + filteringCmd+=" -g $filter" + done + filteringCmd+=" ${flagPrefix}" + + __%[1]s_debug "File filtering command: $filteringCmd" + _arguments '*:filename:'"$filteringCmd" + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subdir + subdir="${completions[1]}" + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "${subdir}" >/dev/null 2>&1 + else + __%[1]s_debug "Listing directories in ." + fi + + local result + _arguments '*:dirname:_files -/'" ${flagPrefix}" + result=$? + if [ -n "$subdir" ]; then + popd >/dev/null 2>&1 + fi + return $result + else + __%[1]s_debug "Calling _describe" + if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then + __%[1]s_debug "_describe found some completions" + + # Return the success of having called _describe + return 0 + else + __%[1]s_debug "_describe did not find completions." + __%[1]s_debug "Checking if we should do file completion." + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + __%[1]s_debug "deactivating file completion" + + # We must return an error code here to let zsh know that there were no + # completions found by _describe; this is what will trigger other + # matching algorithms to attempt to find completions. + # For example zsh can match letters in the middle of words. + return 1 + else + # Perform file completion + __%[1]s_debug "Activating file completion" + + # We must return the result of this command, so it must be the + # last command, or else we must store its result to return it. + _arguments '*:filename:_files'" ${flagPrefix}" + fi + fi + fi +} + +# don't run the completion function when being source-ed or eval-ed +if [ "$funcstack[1]" = "_%[1]s" ]; then + _%[1]s +fi +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, + activeHelpMarker)) +} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md new file mode 100644 index 000000000..7cff61787 --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.md @@ -0,0 +1,48 @@ +## Generating Zsh Completion For Your cobra.Command + +Please refer to [Shell Completions](shell_completions.md) for details. + +## Zsh completions standardization + +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. + +### Deprecation summary + +See further below for more details on these deprecations. + +* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. +* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. + * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. +* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. + * Instead use `ValidArgsFunction`. + +### Behavioral changes + +**Noun completion** +|Old behavior|New behavior| +|---|---| +|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| +|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| +`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| +|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| +|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| + +**Flag-value completion** + +|Old behavior|New behavior| +|---|---| +|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| +|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| +|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| +|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| +|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| +|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| + +**Improvements** + +* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) +* File completion by default if no other completions found +* Handling of required flags +* File extension filtering no longer mutually exclusive with bash usage +* Completion of directory names *within* another directory +* Support for `=` form of flags diff --git a/vendor/modules.txt b/vendor/modules.txt index 6c3d7cd4e..35363eb76 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,7 @@ +# github.com/BurntSushi/toml v1.0.0 +## explicit; go 1.16 +github.com/BurntSushi/toml +github.com/BurntSushi/toml/internal # github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 ## explicit; go 1.13 github.com/ProtonMail/go-crypto/bitcurves @@ -18,6 +22,9 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k +# github.com/alessio/shellescape v1.4.1 +## explicit; go 1.14 +github.com/alessio/shellescape # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -127,6 +134,10 @@ github.com/google/gofuzz/bytesource # github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 ## explicit; go 1.14 github.com/google/pprof/profile +# github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 +## explicit; go 1.19 +github.com/google/safetext/common +github.com/google/safetext/yamltemplate # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex @@ -136,6 +147,9 @@ github.com/google/uuid # github.com/imdario/mergo v0.3.13 ## explicit; go 1.13 github.com/imdario/mergo +# github.com/inconshreveable/mousetrap v1.1.0 +## explicit; go 1.18 +github.com/inconshreveable/mousetrap # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern @@ -147,6 +161,9 @@ github.com/json-iterator/go github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-isatty v0.0.17 +## explicit; go 1.15 +github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -196,6 +213,9 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/pelletier/go-toml v1.9.5 +## explicit; go 1.12 +github.com/pelletier/go-toml # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -223,6 +243,9 @@ github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/rogpeppe/go-internal v1.11.0 ## explicit; go 1.19 +# github.com/spf13/cobra v1.7.0 +## explicit; go 1.15 +github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag @@ -770,6 +793,46 @@ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json +# sigs.k8s.io/kind v0.20.0 +## explicit; go 1.16 +sigs.k8s.io/kind/pkg/apis/config/defaults +sigs.k8s.io/kind/pkg/apis/config/v1alpha4 +sigs.k8s.io/kind/pkg/cluster +sigs.k8s.io/kind/pkg/cluster/constants +sigs.k8s.io/kind/pkg/cluster/internal/create +sigs.k8s.io/kind/pkg/cluster/internal/create/actions +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready +sigs.k8s.io/kind/pkg/cluster/internal/delete +sigs.k8s.io/kind/pkg/cluster/internal/kubeadm +sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig +sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig +sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer +sigs.k8s.io/kind/pkg/cluster/internal/logs +sigs.k8s.io/kind/pkg/cluster/internal/providers +sigs.k8s.io/kind/pkg/cluster/internal/providers/common +sigs.k8s.io/kind/pkg/cluster/internal/providers/docker +sigs.k8s.io/kind/pkg/cluster/internal/providers/podman +sigs.k8s.io/kind/pkg/cluster/nodes +sigs.k8s.io/kind/pkg/cluster/nodeutils +sigs.k8s.io/kind/pkg/cmd +sigs.k8s.io/kind/pkg/cmd/kind/version +sigs.k8s.io/kind/pkg/errors +sigs.k8s.io/kind/pkg/exec +sigs.k8s.io/kind/pkg/fs +sigs.k8s.io/kind/pkg/internal/apis/config +sigs.k8s.io/kind/pkg/internal/apis/config/encoding +sigs.k8s.io/kind/pkg/internal/cli +sigs.k8s.io/kind/pkg/internal/env +sigs.k8s.io/kind/pkg/internal/patch +sigs.k8s.io/kind/pkg/internal/sets +sigs.k8s.io/kind/pkg/internal/version +sigs.k8s.io/kind/pkg/log # sigs.k8s.io/kustomize/api v0.13.2 ## explicit; go 1.19 sigs.k8s.io/kustomize/api/filters/annotations diff --git a/vendor/sigs.k8s.io/kind/LICENSE b/vendor/sigs.k8s.io/kind/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/kind/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go new file mode 100644 index 000000000..d546929d5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package defaults contains cross-api-version configuration defaults +package defaults + +// Image is the default for the Config.Image field, aka the default node image. +const Image = "kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72" diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go new file mode 100644 index 000000000..4626fdd41 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + "sigs.k8s.io/kind/pkg/apis/config/defaults" +) + +// SetDefaultsCluster sets uninitialized fields to their default value. +func SetDefaultsCluster(obj *Cluster) { + // default to a one node cluster + if len(obj.Nodes) == 0 { + obj.Nodes = []Node{ + { + Image: defaults.Image, + Role: ControlPlaneRole, + }, + } + } + // default the nodes + for i := range obj.Nodes { + a := &obj.Nodes[i] + SetDefaultsNode(a) + } + if obj.Networking.IPFamily == "" { + obj.Networking.IPFamily = IPv4Family + } + // default to listening on 127.0.0.1:randomPort on ipv4 + // and [::1]:randomPort on ipv6 + if obj.Networking.APIServerAddress == "" { + obj.Networking.APIServerAddress = "127.0.0.1" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.APIServerAddress = "::1" + } + } + // default the pod CIDR + if obj.Networking.PodSubnet == "" { + obj.Networking.PodSubnet = "10.244.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + // node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices + // xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else- + obj.Networking.PodSubnet = "fd00:10:244::/56" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.PodSubnet = "10.244.0.0/16,fd00:10:244::/56" + } + } + // default the service CIDR using a different subnet than kubeadm default + // https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32 + // Note: kubeadm is using a /12 subnet, that may allocate a 2^20 bitmap in etcd + // we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services) + if obj.Networking.ServiceSubnet == "" { + obj.Networking.ServiceSubnet = "10.96.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.ServiceSubnet = "fd00:10:96::/112" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.ServiceSubnet = "10.96.0.0/16,fd00:10:96::/112" + } + } + // default the KubeProxyMode using iptables as it's already the default + if obj.Networking.KubeProxyMode == "" { + obj.Networking.KubeProxyMode = IPTablesProxyMode + } +} + +// SetDefaultsNode sets uninitialized fields to their default value. +func SetDefaultsNode(obj *Node) { + if obj.Image == "" { + obj.Image = defaults.Image + } + + if obj.Role == "" { + obj.Role = ControlPlaneRole + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go new file mode 100644 index 000000000..68c743e78 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha4 implements the v1alpha4 apiVersion of kind's cluster +// configuration +// +// +k8s:deepcopy-gen=package +package v1alpha4 diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go new file mode 100644 index 000000000..308a6853b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go @@ -0,0 +1,317 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +// Cluster contains kind cluster configuration +type Cluster struct { + TypeMeta `yaml:",inline" json:",inline"` + + // The cluster name. + // Optional, this will be overridden by --name / KIND_CLUSTER_NAME + Name string `yaml:"name,omitempty" json:"name,omitempty"` + + // Nodes contains the list of nodes defined in the `kind` Cluster + // If unset this will default to a single control-plane node + // Note that if more than one control plane is specified, an external + // control plane load balancer will be provisioned implicitly + Nodes []Node `yaml:"nodes,omitempty" json:"nodes,omitempty"` + + /* Advanced fields */ + + // Networking contains cluster wide network settings + Networking Networking `yaml:"networking,omitempty" json:"networking,omitempty"` + + // FeatureGates contains a map of Kubernetes feature gates to whether they + // are enabled. The feature gates specified here are passed to all Kubernetes components as flags or in config. + // + // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + FeatureGates map[string]bool `yaml:"featureGates,omitempty" json:"featureGates,omitempty"` + + // RuntimeConfig Keys and values are translated into --runtime-config values for kube-apiserver, separated by commas. + // + // Use this to enable alpha APIs. + RuntimeConfig map[string]string `yaml:"runtimeConfig,omitempty" json:"runtimeConfig,omitempty"` + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // merge patches. The `kind` field must match the target object, and + // if `apiVersion` is specified it will only be applied to matching objects. + // + // This should be an inline yaml blob-string + // + // https://tools.ietf.org/html/rfc7386 + // + // The cluster-level patches are applied before the node-level patches. + KubeadmConfigPatches []string `yaml:"kubeadmConfigPatches,omitempty" json:"kubeadmConfigPatches,omitempty"` + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as JSON 6902 patches. The `kind` field must match the target object, and + // if group or version are specified it will only be objects matching the + // apiVersion: group+"/"+version + // + // Name and Namespace are now ignored, but the fields continue to exist for + // backwards compatibility of parsing the config. The name of the generated + // config was/is always fixed as is the namespace so these fields have + // always been a no-op. + // + // https://tools.ietf.org/html/rfc6902 + // + // The cluster-level patches are applied before the node-level patches. + KubeadmConfigPatchesJSON6902 []PatchJSON6902 `yaml:"kubeadmConfigPatchesJSON6902,omitempty" json:"kubeadmConfigPatchesJSON6902,omitempty"` + + // ContainerdConfigPatches are applied to every node's containerd config + // in the order listed. + // These should be toml stringsto be applied as merge patches + ContainerdConfigPatches []string `yaml:"containerdConfigPatches,omitempty" json:"containerdConfigPatches,omitempty"` + + // ContainerdConfigPatchesJSON6902 are applied to every node's containerd config + // in the order listed. + // These should be YAML or JSON formatting RFC 6902 JSON patches + ContainerdConfigPatchesJSON6902 []string `yaml:"containerdConfigPatchesJSON6902,omitempty" json:"containerdConfigPatchesJSON6902,omitempty"` +} + +// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta +// No need for a direct dependence; the fields are stable. +type TypeMeta struct { + Kind string `yaml:"kind,omitempty" json:"kind,omitempty"` + APIVersion string `yaml:"apiVersion,omitempty" json:"apiVersion,omitempty"` +} + +// Node contains settings for a node in the `kind` Cluster. +// A node in kind config represent a container that will be provisioned with all the components +// required for the assigned role in the Kubernetes cluster +type Node struct { + // Role defines the role of the node in the Kubernetes cluster + // created by kind + // + // Defaults to "control-plane" + Role NodeRole `yaml:"role,omitempty" json:"role,omitempty"` + + // Image is the node image to use when creating this node + // If unset a default image will be used, see defaults.Image + Image string `yaml:"image,omitempty" json:"image,omitempty"` + + // Labels are the labels with which the respective node will be labeled + Labels map[string]string `yaml:"labels,omitempty" json:"labels,omitempty"` + + /* Advanced fields */ + + // TODO: cri-like types should be inline instead + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + ExtraMounts []Mount `yaml:"extraMounts,omitempty" json:"extraMounts,omitempty"` + + // ExtraPortMappings describes additional port mappings for the node container + // binded to a host Port + ExtraPortMappings []PortMapping `yaml:"extraPortMappings,omitempty" json:"extraPortMappings,omitempty"` + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // merge patches. The `kind` field must match the target object, and + // if `apiVersion` is specified it will only be applied to matching objects. + // + // This should be an inline yaml blob-string + // + // https://tools.ietf.org/html/rfc7386 + // + // The node-level patches will be applied after the cluster-level patches + // have been applied. (See Cluster.KubeadmConfigPatches) + KubeadmConfigPatches []string `yaml:"kubeadmConfigPatches,omitempty" json:"kubeadmConfigPatches,omitempty"` + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as JSON 6902 patches. The `kind` field must match the target object, and + // if group or version are specified it will only be objects matching the + // apiVersion: group+"/"+version + // + // Name and Namespace are now ignored, but the fields continue to exist for + // backwards compatibility of parsing the config. The name of the generated + // config was/is always fixed as is the namespace so these fields have + // always been a no-op. + // + // https://tools.ietf.org/html/rfc6902 + // + // The node-level patches will be applied after the cluster-level patches + // have been applied. (See Cluster.KubeadmConfigPatchesJSON6902) + KubeadmConfigPatchesJSON6902 []PatchJSON6902 `yaml:"kubeadmConfigPatchesJSON6902,omitempty" json:"kubeadmConfigPatchesJSON6902,omitempty"` +} + +// NodeRole defines possible role for nodes in a Kubernetes cluster managed by `kind` +type NodeRole string + +const ( + // ControlPlaneRole identifies a node that hosts a Kubernetes control-plane. + // NOTE: in single node clusters, control-plane nodes act also as a worker + // nodes, in which case the taint will be removed. see: + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#control-plane-node-isolation + ControlPlaneRole NodeRole = "control-plane" + // WorkerRole identifies a node that hosts a Kubernetes worker + WorkerRole NodeRole = "worker" +) + +// Networking contains cluster wide network settings +type Networking struct { + // IPFamily is the network cluster model, currently it can be ipv4 or ipv6 + IPFamily ClusterIPFamily `yaml:"ipFamily,omitempty" json:"ipFamily,omitempty"` + // APIServerPort is the listen port on the host for the Kubernetes API Server + // Defaults to a random port on the host obtained by kind + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + APIServerPort int32 `yaml:"apiServerPort,omitempty" json:"apiServerPort,omitempty"` + // APIServerAddress is the listen address on the host for the Kubernetes + // API Server. This should be an IP address. + // + // Defaults to 127.0.0.1 + APIServerAddress string `yaml:"apiServerAddress,omitempty" json:"apiServerAddress,omitempty"` + // PodSubnet is the CIDR used for pod IPs + // kind will select a default if unspecified + PodSubnet string `yaml:"podSubnet,omitempty" json:"podSubnet,omitempty"` + // ServiceSubnet is the CIDR used for services VIPs + // kind will select a default if unspecified for IPv6 + ServiceSubnet string `yaml:"serviceSubnet,omitempty" json:"serviceSubnet,omitempty"` + // If DisableDefaultCNI is true, kind will not install the default CNI setup. + // Instead the user should install their own CNI after creating the cluster. + DisableDefaultCNI bool `yaml:"disableDefaultCNI,omitempty" json:"disableDefaultCNI,omitempty"` + // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + // Defaults to 'iptables' mode + KubeProxyMode ProxyMode `yaml:"kubeProxyMode,omitempty" json:"kubeProxyMode,omitempty"` + // DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host. + DNSSearch *[]string `yaml:"dnsSearch,omitempty" json:"dnsSearch,omitempty"` +} + +// ClusterIPFamily defines cluster network IP family +type ClusterIPFamily string + +const ( + // IPv4Family sets ClusterIPFamily to ipv4 + IPv4Family ClusterIPFamily = "ipv4" + // IPv6Family sets ClusterIPFamily to ipv6 + IPv6Family ClusterIPFamily = "ipv6" + // DualStackFamily sets ClusterIPFamily to dual + DualStackFamily ClusterIPFamily = "dual" +) + +// ProxyMode defines a proxy mode for kube-proxy +type ProxyMode string + +const ( + // IPTablesProxyMode sets ProxyMode to iptables + IPTablesProxyMode ProxyMode = "iptables" + // IPVSProxyMode sets ProxyMode to ipvs + IPVSProxyMode ProxyMode = "ipvs" +) + +// PatchJSON6902 represents an inline kustomize json 6902 patch +// https://tools.ietf.org/html/rfc6902 +type PatchJSON6902 struct { + // these fields specify the patch target resource + Group string `yaml:"group" json:"group"` + Version string `yaml:"version" json:"version"` + Kind string `yaml:"kind" json:"kind"` + // Patch should contain the contents of the json patch as a string + Patch string `yaml:"patch" json:"patch"` +} + +/* +These types are from +https://github.com/kubernetes/kubernetes/blob/063e7ff358fdc8b0916e6f39beedc0d025734cb1/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go#L183 +*/ + +// Mount specifies a host volume to mount into a container. +// This is a close copy of the upstream cri Mount type +// see: k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2 +// It additionally serializes the "propagation" field with the string enum +// names on disk as opposed to the int32 values, and the serialized field names +// have been made closer to core/v1 VolumeMount field names +// In yaml this looks like: +// +// containerPath: /foo +// hostPath: /bar +// readOnly: true +// selinuxRelabel: false +// propagation: None +// +// Propagation may be one of: None, HostToContainer, Bidirectional +type Mount struct { + // Path of the mount within the container. + ContainerPath string `yaml:"containerPath,omitempty" json:"containerPath,omitempty"` + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string `yaml:"hostPath,omitempty" json:"hostPath,omitempty"` + // If set, the mount is read-only. + Readonly bool `yaml:"readOnly,omitempty" json:"readOnly,omitempty"` + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool `yaml:"selinuxRelabel,omitempty" json:"selinuxRelabel,omitempty"` + // Requested propagation mode. + Propagation MountPropagation `yaml:"propagation,omitempty" json:"propagation,omitempty"` +} + +// PortMapping specifies a host port mapped into a container port. +// In yaml this looks like: +// +// containerPort: 80 +// hostPort: 8000 +// listenAddress: 127.0.0.1 +// protocol: TCP +type PortMapping struct { + // Port within the container. + ContainerPort int32 `yaml:"containerPort,omitempty" json:"containerPort,omitempty"` + // Port on the host. + // + // If unset, a random port will be selected. + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + HostPort int32 `yaml:"hostPort,omitempty" json:"hostPort,omitempty"` + // TODO: add protocol (tcp/udp) and port-ranges + ListenAddress string `yaml:"listenAddress,omitempty" json:"listenAddress,omitempty"` + // Protocol (TCP/UDP/SCTP) + Protocol PortMappingProtocol `yaml:"protocol,omitempty" json:"protocol,omitempty"` +} + +// MountPropagation represents an "enum" for mount propagation options, +// see also Mount. +type MountPropagation string + +const ( + // MountPropagationNone specifies that no mount propagation + // ("private" in Linux terminology). + MountPropagationNone MountPropagation = "None" + // MountPropagationHostToContainer specifies that mounts get propagated + // from the host to the container ("rslave" in Linux). + MountPropagationHostToContainer MountPropagation = "HostToContainer" + // MountPropagationBidirectional specifies that mounts get propagated from + // the host to the container and from the container to the host + // ("rshared" in Linux). + MountPropagationBidirectional MountPropagation = "Bidirectional" +) + +// PortMappingProtocol represents an "enum" for port mapping protocol options, +// see also PortMapping. +type PortMappingProtocol string + +const ( + // PortMappingProtocolTCP specifies TCP protocol + PortMappingProtocolTCP PortMappingProtocol = "TCP" + // PortMappingProtocolUDP specifies UDP protocol + PortMappingProtocolUDP PortMappingProtocol = "UDP" + // PortMappingProtocolSCTP specifies SCTP protocol + PortMappingProtocolSCTP PortMappingProtocol = "SCTP" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go new file mode 100644 index 000000000..d34d4c8cc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" +) + +/* +Custom YAML (de)serialization for these types +*/ + +// UnmarshalYAML implements custom decoding YAML +// https://godoc.org/gopkg.in/yaml.v3 +func (m *Mount) UnmarshalYAML(unmarshal func(interface{}) error) error { + // first unmarshal in the alias type (to avoid a recursion loop on unmarshal) + type MountAlias Mount + var a MountAlias + if err := unmarshal(&a); err != nil { + return err + } + // now handle propagation + switch a.Propagation { + case "": // unset, will be defaulted + case MountPropagationNone: + case MountPropagationHostToContainer: + case MountPropagationBidirectional: + default: + return errors.Errorf("Unknown MountPropagation: %q", a.Propagation) + } + // and copy over the fields + *m = Mount(a) + return nil +} + +// UnmarshalYAML implements custom decoding YAML +// https://godoc.org/gopkg.in/yaml.v3 +func (p *PortMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { + // first unmarshal in the alias type (to avoid a recursion loop on unmarshal) + type PortMappingAlias PortMapping + var a PortMappingAlias + if err := unmarshal(&a); err != nil { + return err + } + // now handle the protocol field + a.Protocol = PortMappingProtocol(strings.ToUpper(string(a.Protocol))) + switch a.Protocol { + case "": // unset, will be defaulted + case PortMappingProtocolTCP: + case PortMappingProtocolUDP: + case PortMappingProtocolSCTP: + default: + return errors.Errorf("Unknown PortMappingProtocol: %q", a.Protocol) + } + // and copy over the fields + *p = PortMapping(a) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go new file mode 100644 index 000000000..b210133da --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go @@ -0,0 +1,213 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha4 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Networking.DeepCopyInto(&out.Networking) + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatches != nil { + in, out := &in.ContainerdConfigPatches, &out.ContainerdConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatchesJSON6902 != nil { + in, out := &in.ContainerdConfigPatchesJSON6902, &out.ContainerdConfigPatchesJSON6902 + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.DNSSearch != nil { + in, out := &in.DNSSearch, &out.DNSSearch + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.ExtraPortMappings != nil { + in, out := &in.ExtraPortMappings, &out.ExtraPortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchJSON6902) DeepCopyInto(out *PatchJSON6902) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchJSON6902. +func (in *PatchJSON6902) DeepCopy() *PatchJSON6902 { + if in == nil { + return nil + } + out := new(PatchJSON6902) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeMeta) DeepCopyInto(out *TypeMeta) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeMeta. +func (in *TypeMeta) DeepCopy() *TypeMeta { + if in == nil { + return nil + } + out := new(TypeMeta) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go new file mode 100644 index 000000000..fb9c0734c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package constants contains well known constants for kind clusters +package constants + +// DefaultClusterName is the default cluster Context name +const DefaultClusterName = "kind" + +/* node role value constants */ +const ( + // ControlPlaneNodeRoleValue identifies a node that hosts a Kubernetes + // control-plane. + // + // NOTE: in single node clusters, control-plane nodes act as worker nodes + ControlPlaneNodeRoleValue string = "control-plane" + + // WorkerNodeRoleValue identifies a node that hosts a Kubernetes worker + WorkerNodeRoleValue string = "worker" + + // ExternalLoadBalancerNodeRoleValue identifies a node that hosts an + // external load balancer for the API server in HA configurations. + // + // Please note that `kind` nodes hosting external load balancer are not + // kubernetes nodes + ExternalLoadBalancerNodeRoleValue string = "external-load-balancer" + + // ExternalEtcdNodeRoleValue identifies a node that hosts an external-etcd + // instance. + // + // WARNING: this node type is not yet implemented! + // + // Please note that `kind` nodes hosting external etcd are not + // kubernetes nodes + ExternalEtcdNodeRoleValue string = "external-etcd" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go b/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go new file mode 100644 index 000000000..699bac510 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go @@ -0,0 +1,126 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "time" + + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create" + internalencoding "sigs.k8s.io/kind/pkg/internal/apis/config/encoding" +) + +// CreateOption is a Provider.Create option +type CreateOption interface { + apply(*internalcreate.ClusterOptions) error +} + +type createOptionAdapter func(*internalcreate.ClusterOptions) error + +func (c createOptionAdapter) apply(o *internalcreate.ClusterOptions) error { + return c(o) +} + +// CreateWithConfigFile configures the config file path to use +func CreateWithConfigFile(path string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + var err error + o.Config, err = internalencoding.Load(path) + return err + }) +} + +// CreateWithRawConfig configures the config to use from raw (yaml) bytes +func CreateWithRawConfig(raw []byte) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + var err error + o.Config, err = internalencoding.Parse(raw) + return err + }) +} + +// CreateWithV1Alpha4Config configures the cluster with a v1alpha4 config +func CreateWithV1Alpha4Config(config *v1alpha4.Cluster) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.Config = internalencoding.V1Alpha4ToInternal(config) + return nil + }) +} + +// CreateWithNodeImage overrides the image on all nodes in config +// as an easy way to change the Kubernetes version +func CreateWithNodeImage(nodeImage string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.NodeImage = nodeImage + return nil + }) +} + +// CreateWithRetain disables deletion of nodes and any other cleanup +// that would normally occur after a failure to create +// This is mainly used for debugging purposes +func CreateWithRetain(retain bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.Retain = retain + return nil + }) +} + +// CreateWithWaitForReady configures a maximum wait time for the control plane +// node(s) to be ready. By default no waiting is performed +func CreateWithWaitForReady(waitTime time.Duration) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.WaitForReady = waitTime + return nil + }) +} + +// CreateWithKubeconfigPath sets the explicit --kubeconfig path +func CreateWithKubeconfigPath(explicitPath string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.KubeconfigPath = explicitPath + return nil + }) +} + +// CreateWithStopBeforeSettingUpKubernetes enables skipping setting up +// kubernetes (kubeadm init etc.) after creating node containers +// This generally shouldn't be used and is only lightly supported, but allows +// provisioning node containers for experimentation +func CreateWithStopBeforeSettingUpKubernetes(stopBeforeSettingUpKubernetes bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.StopBeforeSettingUpKubernetes = stopBeforeSettingUpKubernetes + return nil + }) +} + +// CreateWithDisplayUsage enables displaying usage if displayUsage is true +func CreateWithDisplayUsage(displayUsage bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.DisplayUsage = displayUsage + return nil + }) +} + +// CreateWithDisplaySalutation enables display a salutation at the end of create +// cluster if displaySalutation is true +func CreateWithDisplaySalutation(displaySalutation bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.DisplaySalutation = displaySalutation + return nil + }) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go new file mode 100644 index 000000000..af19392fd --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cluster implements kind kubernetes-in-docker cluster management +package cluster diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go new file mode 100644 index 000000000..61dd53cbf --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actions + +import ( + "sync" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Action defines a step of bringing up a kind cluster after initial node +// container creation +type Action interface { + Execute(ctx *ActionContext) error +} + +// ActionContext is data supplied to all actions +type ActionContext struct { + Logger log.Logger + Status *cli.Status + Config *config.Cluster + Provider providers.Provider + cache *cachedData +} + +// NewActionContext returns a new ActionContext +func NewActionContext( + logger log.Logger, + status *cli.Status, + provider providers.Provider, + cfg *config.Cluster, +) *ActionContext { + return &ActionContext{ + Logger: logger, + Status: status, + Provider: provider, + Config: cfg, + cache: &cachedData{}, + } +} + +type cachedData struct { + mu sync.RWMutex + nodes []nodes.Node +} + +func (cd *cachedData) getNodes() []nodes.Node { + cd.mu.RLock() + defer cd.mu.RUnlock() + return cd.nodes +} + +func (cd *cachedData) setNodes(n []nodes.Node) { + cd.mu.Lock() + defer cd.mu.Unlock() + cd.nodes = n +} + +// Nodes returns the list of cluster nodes, this is a cached call +func (ac *ActionContext) Nodes() ([]nodes.Node, error) { + cachedNodes := ac.cache.getNodes() + if cachedNodes != nil { + return cachedNodes, nil + } + n, err := ac.Provider.ListNodes(ac.Config.Name) + if err != nil { + return nil, err + } + ac.cache.setNodes(n) + return n, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go new file mode 100644 index 000000000..172619807 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go @@ -0,0 +1,279 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the kubeadm config action +package config + +import ( + "bytes" + "fmt" + "net" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeadm" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/patch" +) + +// Action implements action for creating the node config files +type Action struct{} + +// NewAction returns a new action for creating the config files +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Writing configuration 📜") + defer ctx.Status.End(false) + + providerInfo, err := ctx.Provider.Info() + if err != nil { + return err + } + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + controlPlaneEndpoint, err := ctx.Provider.GetAPIServerInternalEndpoint(ctx.Config.Name) + if err != nil { + return err + } + + // create kubeadm init config + fns := []func() error{} + + provider := fmt.Sprintf("%s", ctx.Provider) + configData := kubeadm.ConfigData{ + NodeProvider: provider, + ClusterName: ctx.Config.Name, + ControlPlaneEndpoint: controlPlaneEndpoint, + APIBindPort: common.APIServerInternalPort, + APIServerAddress: ctx.Config.Networking.APIServerAddress, + Token: kubeadm.Token, + PodSubnet: ctx.Config.Networking.PodSubnet, + KubeProxyMode: string(ctx.Config.Networking.KubeProxyMode), + ServiceSubnet: ctx.Config.Networking.ServiceSubnet, + ControlPlane: true, + IPFamily: ctx.Config.Networking.IPFamily, + FeatureGates: ctx.Config.FeatureGates, + RuntimeConfig: ctx.Config.RuntimeConfig, + RootlessProvider: providerInfo.Rootless, + } + + kubeadmConfigPlusPatches := func(node nodes.Node, data kubeadm.ConfigData) func() error { + return func() error { + data.NodeName = node.String() + kubeadmConfig, err := getKubeadmConfig(ctx.Config, data, node, provider) + if err != nil { + // TODO(bentheelder): logging here + return errors.Wrap(err, "failed to generate kubeadm config content") + } + + ctx.Logger.V(2).Infof("Using the following kubeadm config for node %s:\n%s", node.String(), kubeadmConfig) + return writeKubeadmConfig(kubeadmConfig, node) + } + } + + // create the kubeadm join configuration for the kubernetes cluster nodes only + kubeNodes, err := nodeutils.InternalNodes(allNodes) + if err != nil { + return err + } + + for _, node := range kubeNodes { + node := node // capture loop variable + configData := configData // copy config data + fns = append(fns, kubeadmConfigPlusPatches(node, configData)) + } + + // Create the kubeadm config in all nodes concurrently + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + + // if we have containerd config, patch all the nodes concurrently + if len(ctx.Config.ContainerdConfigPatches) > 0 || len(ctx.Config.ContainerdConfigPatchesJSON6902) > 0 { + fns := make([]func() error, len(kubeNodes)) + for i, node := range kubeNodes { + node := node // capture loop variable + fns[i] = func() error { + // read and patch the config + const containerdConfigPath = "/etc/containerd/config.toml" + var buff bytes.Buffer + if err := node.Command("cat", containerdConfigPath).SetStdout(&buff).Run(); err != nil { + return errors.Wrap(err, "failed to read containerd config from node") + } + patched, err := patch.TOML(buff.String(), ctx.Config.ContainerdConfigPatches, ctx.Config.ContainerdConfigPatchesJSON6902) + if err != nil { + return errors.Wrap(err, "failed to patch containerd config") + } + if err := nodeutils.WriteFile(node, containerdConfigPath, patched); err != nil { + return errors.Wrap(err, "failed to write patched containerd config") + } + // restart containerd now that we've re-configured it + // skip if containerd is not running + if err := node.Command("bash", "-c", `! pgrep --exact containerd || systemctl restart containerd`).Run(); err != nil { + return errors.Wrap(err, "failed to restart containerd after patching config") + } + return nil + } + } + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + } + + // mark success + ctx.Status.End(true) + return nil +} + +// getKubeadmConfig generates the kubeadm config contents for the cluster +// by running data through the template and applying patches as needed. +func getKubeadmConfig(cfg *config.Cluster, data kubeadm.ConfigData, node nodes.Node, provider string) (path string, err error) { + kubeVersion, err := nodeutils.KubeVersion(node) + if err != nil { + // TODO(bentheelder): logging here + return "", errors.Wrap(err, "failed to get kubernetes version from node") + } + data.KubernetesVersion = kubeVersion + + // TODO: gross hack! + // identify node in config by matching name (since these are named in order) + // we should really just streamline the bootstrap code and maintain + // this mapping ... something for the next major refactor + var configNode *config.Node + namer := common.MakeNodeNamer("") + for i := range cfg.Nodes { + n := &cfg.Nodes[i] + nodeSuffix := namer(string(n.Role)) + if strings.HasSuffix(node.String(), nodeSuffix) { + configNode = n + } + } + if configNode == nil { + return "", errors.Errorf("failed to match node %q to config", node.String()) + } + + // get the node ip address + nodeAddress, nodeAddressIPv6, err := node.IP() + if err != nil { + return "", errors.Wrap(err, "failed to get IP for node") + } + + data.NodeAddress = nodeAddress + // configure the right protocol addresses + if cfg.Networking.IPFamily == config.IPv6Family || cfg.Networking.IPFamily == config.DualStackFamily { + if ip := net.ParseIP(nodeAddressIPv6); ip.To16() == nil { + return "", errors.Errorf("failed to get IPv6 address for node %s; is %s configured to use IPv6 correctly?", node.String(), provider) + } + data.NodeAddress = nodeAddressIPv6 + if cfg.Networking.IPFamily == config.DualStackFamily { + // order matters since the nodeAddress will be used later to configure the apiserver advertise address + // Ref: #2484 + primaryServiceSubnet := strings.Split(cfg.Networking.ServiceSubnet, ",")[0] + ip, _, err := net.ParseCIDR(primaryServiceSubnet) + if err != nil { + return "", fmt.Errorf("failed to parse primary Service Subnet %s (%s): %w", primaryServiceSubnet, cfg.Networking.ServiceSubnet, err) + } + if ip.To4() != nil { + data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddress, nodeAddressIPv6) + } else { + data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddressIPv6, nodeAddress) + } + } + } + + // configure the node labels + if len(configNode.Labels) > 0 { + data.NodeLabels = hashMapLabelsToCommaSeparatedLabels(configNode.Labels) + } + + // set the node role + data.ControlPlane = string(configNode.Role) == constants.ControlPlaneNodeRoleValue + + // generate the config contents + cf, err := kubeadm.Config(data) + if err != nil { + return "", err + } + + clusterPatches, clusterJSONPatches := allPatchesFromConfig(cfg) + // apply cluster-level patches first + patchedConfig, err := patch.KubeYAML(cf, clusterPatches, clusterJSONPatches) + if err != nil { + return "", err + } + + // if needed, apply current node's patches + if len(configNode.KubeadmConfigPatches) > 0 || len(configNode.KubeadmConfigPatchesJSON6902) > 0 { + patchedConfig, err = patch.KubeYAML(patchedConfig, configNode.KubeadmConfigPatches, configNode.KubeadmConfigPatchesJSON6902) + if err != nil { + return "", err + } + } + + // fix all the patches to have name metadata matching the generated config + return removeMetadata(patchedConfig), nil +} + +// trims out the metadata.name we put in the config for kustomize matching, +// kubeadm will complain about this otherwise +func removeMetadata(kustomized string) string { + return strings.Replace( + kustomized, + `metadata: + name: config +`, + "", + -1, + ) +} + +func allPatchesFromConfig(cfg *config.Cluster) (patches []string, jsonPatches []config.PatchJSON6902) { + return cfg.KubeadmConfigPatches, cfg.KubeadmConfigPatchesJSON6902 +} + +// writeKubeadmConfig writes the kubeadm configuration in the specified node +func writeKubeadmConfig(kubeadmConfig string, node nodes.Node) error { + // copy the config to the node + if err := nodeutils.WriteFile(node, "/kind/kubeadm.conf", kubeadmConfig); err != nil { + // TODO(bentheelder): logging here + return errors.Wrap(err, "failed to copy kubeadm config to node") + } + + return nil +} + +// hashMapLabelsToCommaSeparatedLabels converts labels in hashmap form to labels in a comma-separated string form like "key1=value1,key2=value2" +func hashMapLabelsToCommaSeparatedLabels(labels map[string]string) string { + output := "" + for key, value := range labels { + output += fmt.Sprintf("%s=%s,", key, value) + } + return strings.TrimSuffix(output, ",") // remove the last character (comma) in the output string +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go new file mode 100644 index 000000000..fc9675173 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go @@ -0,0 +1,134 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package installcni implements the install CNI action +package installcni + +import ( + "bytes" + "strings" + + "github.com/google/safetext/yamltemplate" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/patch" +) + +type action struct{} + +// NewAction returns a new action for installing default CNI +func NewAction() actions.Action { + return &action{} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Installing CNI 🔌") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // read the manifest from the node + var raw bytes.Buffer + if err := node.Command("cat", "/kind/manifests/default-cni.yaml").SetStdout(&raw).Run(); err != nil { + return errors.Wrap(err, "failed to read CNI manifest") + } + manifest := raw.String() + + // TODO: remove this check? + // backwards compatibility for mounting your own manifest file to the default + // location + // NOTE: this is intentionally undocumented, as an internal implementation + // detail. Going forward users should disable the default CNI and install + // their own, or use the default. The internal templating mechanism is + // not intended for external usage and is unstable. + if strings.Contains(manifest, "would you kindly template this file") { + t, err := yamltemplate.New("cni-manifest").Parse(manifest) + if err != nil { + return errors.Wrap(err, "failed to parse CNI manifest template") + } + var out bytes.Buffer + err = t.Execute(&out, &struct { + PodSubnet string + }{ + PodSubnet: ctx.Config.Networking.PodSubnet, + }) + if err != nil { + return errors.Wrap(err, "failed to execute CNI manifest template") + } + manifest = out.String() + } + + // NOTE: this is intentionally undocumented, as an internal implementation + // detail. Going forward users should disable the default CNI and install + // their own, or use the default. The internal templating mechanism is + // not intended for external usage and is unstable. + if strings.Contains(manifest, "would you kindly patch this file") { + // Add the controlplane endpoint so kindnet doesn´t have to wait for kube-proxy + controlPlaneEndpoint, err := ctx.Provider.GetAPIServerInternalEndpoint(ctx.Config.Name) + if err != nil { + return err + } + + patchValue := ` +- op: add + path: /spec/template/spec/containers/0/env/- + value: + name: CONTROL_PLANE_ENDPOINT + value: ` + controlPlaneEndpoint + + controlPlanePatch6902 := config.PatchJSON6902{ + Group: "apps", + Version: "v1", + Kind: "DaemonSet", + Patch: patchValue, + } + + patchedConfig, err := patch.KubeYAML(manifest, nil, []config.PatchJSON6902{controlPlanePatch6902}) + if err != nil { + return err + } + manifest = patchedConfig + } + + ctx.Logger.V(5).Infof("Using the following Kindnetd config:\n%s", manifest) + + // install the manifest + if err := node.Command( + "kubectl", "create", "--kubeconfig=/etc/kubernetes/admin.conf", + "-f", "-", + ).SetStdin(strings.NewReader(manifest)).Run(); err != nil { + return errors.Wrap(err, "failed to apply overlay network") + } + + // mark success + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go new file mode 100644 index 000000000..fa9a095b0 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package installstorage implements the an action to install a default +// storageclass +package installstorage + +import ( + "bytes" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" +) + +type action struct{} + +// NewAction returns a new action for installing storage +func NewAction() actions.Action { + return &action{} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Installing StorageClass 💾") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // add the default storage class + if err := addDefaultStorage(ctx.Logger, node); err != nil { + return errors.Wrap(err, "failed to add default storage class") + } + + // mark success + ctx.Status.End(true) + return nil +} + +// legacy default storage class +// we need this for e2es (StatefulSet) +// newer kind images ship a storage driver manifest +const defaultStorageManifest = `# host-path based default storage class +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + namespace: kube-system + name: standard + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/host-path` + +func addDefaultStorage(logger log.Logger, controlPlane nodes.Node) error { + // start with fallback default, and then try to get the newer kind node + // storage manifest if present + manifest := defaultStorageManifest + var raw bytes.Buffer + if err := controlPlane.Command("cat", "/kind/manifests/default-storage.yaml").SetStdout(&raw).Run(); err != nil { + logger.Warn("Could not read storage manifest, falling back on old k8s.io/host-path default ...") + } else { + manifest = raw.String() + } + + // apply the manifest + in := strings.NewReader(manifest) + cmd := controlPlane.Command( + "kubectl", + "--kubeconfig=/etc/kubernetes/admin.conf", "apply", "-f", "-", + ) + cmd.SetStdin(in) + return cmd.Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go new file mode 100644 index 000000000..cc587940e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadminit implements the kubeadm init action +package kubeadminit + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// kubeadmInitAction implements action for executing the kubeadm init +// and a set of default post init operations like e.g. install the +// CNI network plugin. +type action struct { + skipKubeProxy bool +} + +// NewAction returns a new action for kubeadm init +func NewAction(cfg *config.Cluster) actions.Action { + return &action{skipKubeProxy: cfg.Networking.KubeProxyMode == config.NoneProxyMode} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Starting control-plane 🕹️") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + // TODO: eliminate the concept of bootstrapcontrolplane node entirely + // outside this method + node, err := nodeutils.BootstrapControlPlaneNode(allNodes) + if err != nil { + return err + } + + // skip preflight checks, as these have undesirable side effects + // and don't tell us much. requires kubeadm 1.13+ + skipPhases := "preflight" + if a.skipKubeProxy { + skipPhases += ",addon/kube-proxy" + } + + // run kubeadm + cmd := node.Command( + // init because this is the control plane node + "kubeadm", "init", + "--skip-phases="+skipPhases, + // specify our generated config file + "--config=/kind/kubeadm.conf", + "--skip-token-print", + // increase verbosity for debugging + "--v=6", + ) + lines, err := exec.CombinedOutputLines(cmd) + ctx.Logger.V(3).Info(strings.Join(lines, "\n")) + if err != nil { + return errors.Wrap(err, "failed to init node with kubeadm") + } + + // copy some files to the other control plane nodes + otherControlPlanes, err := nodeutils.SecondaryControlPlaneNodes(allNodes) + if err != nil { + return err + } + for _, otherNode := range otherControlPlanes { + for _, file := range []string{ + // copy over admin config so we can use any control plane to get it later + "/etc/kubernetes/admin.conf", + // copy over certs + "/etc/kubernetes/pki/ca.crt", "/etc/kubernetes/pki/ca.key", + "/etc/kubernetes/pki/front-proxy-ca.crt", "/etc/kubernetes/pki/front-proxy-ca.key", + "/etc/kubernetes/pki/sa.pub", "/etc/kubernetes/pki/sa.key", + // TODO: if we gain external etcd support these will be + // handled differently + "/etc/kubernetes/pki/etcd/ca.crt", "/etc/kubernetes/pki/etcd/ca.key", + } { + if err := nodeutils.CopyNodeToNode(node, otherNode, file); err != nil { + return errors.Wrap(err, "failed to copy admin kubeconfig") + } + } + } + + // if we are only provisioning one node, remove the control plane taint + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#master-isolation + if len(allNodes) == 1 { + // TODO: Once kubeadm 1.23 is no longer supported remove the <1.24 handling. + // TODO: Once kubeadm 1.24 is no longer supported remove the <1.25 handling. + // https://github.com/kubernetes-sigs/kind/issues/1699 + rawVersion, err := nodeutils.KubeVersion(node) + if err != nil { + return errors.Wrap(err, "failed to get Kubernetes version from node") + } + kubeVersion, err := version.ParseSemantic(rawVersion) + if err != nil { + return errors.Wrap(err, "could not parse Kubernetes version") + } + var taints []string + if kubeVersion.LessThan(version.MustParseSemantic("v1.24.0-alpha.1.592+370031cadac624")) { + // for versions older than 1.24 prerelease remove only the old taint + taints = []string{"node-role.kubernetes.io/master-"} + } else if kubeVersion.LessThan(version.MustParseSemantic("v1.25.0-alpha.0.557+84c8afeba39ec9")) { + // for versions between 1.24 and 1.25 prerelease remove both the old and new taint + taints = []string{"node-role.kubernetes.io/control-plane-", "node-role.kubernetes.io/master-"} + } else { + // for any newer version only remove the new taint + taints = []string{"node-role.kubernetes.io/control-plane-"} + } + taintArgs := []string{"--kubeconfig=/etc/kubernetes/admin.conf", "taint", "nodes", "--all"} + taintArgs = append(taintArgs, taints...) + + if err := node.Command( + "kubectl", taintArgs..., + ).Run(); err != nil { + return errors.Wrap(err, "failed to remove control plane taint") + } + } + + // mark success + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go new file mode 100644 index 000000000..fbd33555d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go @@ -0,0 +1,139 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadmjoin implements the kubeadm join action +package kubeadmjoin + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" +) + +// Action implements action for creating the kubeadm join +// and deploying it on the bootstrap control-plane node. +type Action struct{} + +// NewAction returns a new action for creating the kubeadm jion +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // join secondary control plane nodes if any + secondaryControlPlanes, err := nodeutils.SecondaryControlPlaneNodes(allNodes) + if err != nil { + return err + } + if len(secondaryControlPlanes) > 0 { + if err := joinSecondaryControlPlanes(ctx, secondaryControlPlanes); err != nil { + return err + } + } + + // then join worker nodes if any + workers, err := nodeutils.SelectNodesByRole(allNodes, constants.WorkerNodeRoleValue) + if err != nil { + return err + } + if len(workers) > 0 { + if err := joinWorkers(ctx, workers); err != nil { + return err + } + } + + return nil +} + +func joinSecondaryControlPlanes( + ctx *actions.ActionContext, + secondaryControlPlanes []nodes.Node, +) error { + ctx.Status.Start("Joining more control-plane nodes 🎮") + defer ctx.Status.End(false) + + // TODO(bentheelder): it's too bad we can't do this concurrently + // (this is not safe currently) + for _, node := range secondaryControlPlanes { + node := node // capture loop variable + if err := runKubeadmJoin(ctx.Logger, node); err != nil { + return err + } + } + + ctx.Status.End(true) + return nil +} + +func joinWorkers( + ctx *actions.ActionContext, + workers []nodes.Node, +) error { + ctx.Status.Start("Joining worker nodes 🚜") + defer ctx.Status.End(false) + + // create the workers concurrently + fns := []func() error{} + for _, node := range workers { + node := node // capture loop variable + fns = append(fns, func() error { + return runKubeadmJoin(ctx.Logger, node) + }) + } + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + + ctx.Status.End(true) + return nil +} + +// runKubeadmJoin executes kubeadm join command +func runKubeadmJoin(logger log.Logger, node nodes.Node) error { + // run kubeadm join + // TODO(bentheelder): this should be using the config file + cmd := node.Command( + "kubeadm", "join", + // the join command uses the config file generated in a well known location + "--config", "/kind/kubeadm.conf", + // skip preflight checks, as these have undesirable side effects + // and don't tell us much. requires kubeadm 1.13+ + "--skip-phases=preflight", + // increase verbosity for debugging + "--v=6", + ) + lines, err := exec.CombinedOutputLines(cmd) + logger.V(3).Info(strings.Join(lines, "\n")) + if err != nil { + return errors.Wrap(err, "failed to join node with kubeadm") + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go new file mode 100644 index 000000000..bcc2fd107 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loadbalancer implements the load balancer configuration action +package loadbalancer + +import ( + "fmt" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" +) + +// Action implements and action for configuring and starting the +// external load balancer in front of the control-plane nodes. +type Action struct{} + +// NewAction returns a new Action for configuring the load balancer +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // identify external load balancer node + loadBalancerNode, err := nodeutils.ExternalLoadBalancerNode(allNodes) + if err != nil { + return err + } + + // if there's no loadbalancer we're done + if loadBalancerNode == nil { + return nil + } + + // otherwise notify the user + ctx.Status.Start("Configuring the external load balancer ⚖️") + defer ctx.Status.End(false) + + // collect info about the existing controlplane nodes + var backendServers = map[string]string{} + controlPlaneNodes, err := nodeutils.SelectNodesByRole( + allNodes, + constants.ControlPlaneNodeRoleValue, + ) + if err != nil { + return err + } + for _, n := range controlPlaneNodes { + backendServers[n.String()] = fmt.Sprintf("%s:%d", n.String(), common.APIServerInternalPort) + } + + // create loadbalancer config data + loadbalancerConfig, err := loadbalancer.Config(&loadbalancer.ConfigData{ + ControlPlanePort: common.APIServerInternalPort, + BackendServers: backendServers, + IPv6: ctx.Config.Networking.IPFamily == config.IPv6Family, + }) + if err != nil { + return errors.Wrap(err, "failed to generate loadbalancer config data") + } + + // create loadbalancer config on the node + if err := nodeutils.WriteFile(loadBalancerNode, loadbalancer.ConfigPath, loadbalancerConfig); err != nil { + // TODO: logging here + return errors.Wrap(err, "failed to copy loadbalancer config to node") + } + + // reload the config. haproxy will reload on SIGHUP + if err := loadBalancerNode.Command("kill", "-s", "HUP", "1").Run(); err != nil { + return errors.Wrap(err, "failed to reload loadbalancer") + } + + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go new file mode 100644 index 000000000..d106178b2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go @@ -0,0 +1,147 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package waitforready implements the wait for ready action +package waitforready + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// Action implements an action for waiting for the cluster to be ready +type Action struct { + waitTime time.Duration +} + +// NewAction returns a new action for waiting for the cluster to be ready +func NewAction(waitTime time.Duration) actions.Action { + return &Action{ + waitTime: waitTime, + } +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + // skip entirely if the wait time is 0 + if a.waitTime == time.Duration(0) { + return nil + } + ctx.Status.Start( + fmt.Sprintf( + "Waiting ≤ %s for control-plane = Ready ⏳", + formatDuration(a.waitTime), + ), + ) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + // get a control plane node to use to check cluster status + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // Wait for the nodes to reach Ready status. + startTime := time.Now() + + // TODO: Remove the below handling once kubeadm 1.23 is no longer supported. + // https://github.com/kubernetes-sigs/kind/issues/1699 + rawVersion, err := nodeutils.KubeVersion(node) + if err != nil { + return errors.Wrap(err, "failed to get Kubernetes version from node") + } + kubeVersion, err := version.ParseSemantic(rawVersion) + if err != nil { + return errors.Wrap(err, "could not parse Kubernetes version") + } + selectorLabel := "node-role.kubernetes.io/control-plane" + if kubeVersion.LessThan(version.MustParseSemantic("v1.24.0-alpha.1.591+a3d5e5598290df")) { + selectorLabel = "node-role.kubernetes.io/master" + } + + isReady := waitForReady(node, startTime.Add(a.waitTime), selectorLabel) + if !isReady { + ctx.Status.End(false) + ctx.Logger.V(0).Info(" • WARNING: Timed out waiting for Ready ⚠️") + return nil + } + + // mark success + ctx.Status.End(true) + ctx.Logger.V(0).Infof(" • Ready after %s 💚", formatDuration(time.Since(startTime))) + return nil +} + +// WaitForReady uses kubectl inside the "node" container to check if the +// control plane nodes are "Ready". +func waitForReady(node nodes.Node, until time.Time, selectorLabel string) bool { + return tryUntil(until, func() bool { + cmd := node.Command( + "kubectl", + "--kubeconfig=/etc/kubernetes/admin.conf", + "get", + "nodes", + "--selector="+selectorLabel, + // When the node reaches status ready, the status field will be set + // to true. + "-o=jsonpath='{.items..status.conditions[-1:].status}'", + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return false + } + + // 'lines' will return the status of all nodes labeled as master. For + // example, if we have three control plane nodes, and all are ready, + // then the status will have the following format: `True True True'. + status := strings.Fields(lines[0]) + for _, s := range status { + // Check node status. If node is ready then this will be 'True', + // 'False' or 'Unknown' otherwise. + if !strings.Contains(s, "True") { + return false + } + } + return true + }) +} + +// helper that calls `try()“ in a loop until the deadline `until` +// has passed or `try()`returns true, returns whether try ever returned true +func tryUntil(until time.Time, try func() bool) bool { + for until.After(time.Now()) { + if try() { + return true + } + } + return false +} + +func formatDuration(duration time.Duration) string { + return duration.Round(time.Second).String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go new file mode 100644 index 000000000..351ba6c75 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go @@ -0,0 +1,257 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "fmt" + "math/rand" + "time" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/cluster/internal/delete" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/apis/config/encoding" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + configaction "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" +) + +const ( + // Typical host name max limit is 64 characters (https://linux.die.net/man/2/sethostname) + // We append -control-plane (14 characters) to the cluster name on the control plane container + clusterNameMax = 50 +) + +// ClusterOptions holds cluster creation options +type ClusterOptions struct { + Config *config.Cluster + NameOverride string // overrides config.Name + // NodeImage overrides the nodes' images in Config if non-zero + NodeImage string + Retain bool + WaitForReady time.Duration + KubeconfigPath string + // see https://github.com/kubernetes-sigs/kind/issues/324 + StopBeforeSettingUpKubernetes bool // if false kind should setup kubernetes after creating nodes + // Options to control output + DisplayUsage bool + DisplaySalutation bool +} + +// Cluster creates a cluster +func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) error { + // validate provider first + if err := validateProvider(p); err != nil { + return err + } + + // default / process options (namely config) + if err := fixupOptions(opts); err != nil { + return err + } + + // Check if the cluster name already exists + if err := alreadyExists(p, opts.Config.Name); err != nil { + return err + } + + // warn if cluster name might typically be too long + if len(opts.Config.Name) > clusterNameMax { + logger.Warnf("cluster name %q is probably too long, this might not work properly on some systems", opts.Config.Name) + } + + // then validate + if err := opts.Config.Validate(); err != nil { + return err + } + + // setup a status object to show progress to the user + status := cli.StatusForLogger(logger) + + // we're going to start creating now, tell the user + logger.V(0).Infof("Creating cluster %q ...\n", opts.Config.Name) + + // Create node containers implementing defined config Nodes + if err := p.Provision(status, opts.Config); err != nil { + // In case of errors nodes are deleted (except if retain is explicitly set) + if !opts.Retain { + _ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath) + } + return err + } + + // TODO(bentheelder): make this controllable from the command line? + actionsToRun := []actions.Action{ + loadbalancer.NewAction(), // setup external loadbalancer + configaction.NewAction(), // setup kubeadm config + } + if !opts.StopBeforeSettingUpKubernetes { + actionsToRun = append(actionsToRun, + kubeadminit.NewAction(opts.Config), // run kubeadm init + ) + // this step might be skipped, but is next after init + if !opts.Config.Networking.DisableDefaultCNI { + actionsToRun = append(actionsToRun, + installcni.NewAction(), // install CNI + ) + } + // add remaining steps + actionsToRun = append(actionsToRun, + installstorage.NewAction(), // install StorageClass + kubeadmjoin.NewAction(), // run kubeadm join + waitforready.NewAction(opts.WaitForReady), // wait for cluster readiness + ) + } + + // run all actions + actionsContext := actions.NewActionContext(logger, status, p, opts.Config) + for _, action := range actionsToRun { + if err := action.Execute(actionsContext); err != nil { + if !opts.Retain { + _ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath) + } + return err + } + } + + // skip the rest if we're not setting up kubernetes + if opts.StopBeforeSettingUpKubernetes { + return nil + } + + // try exporting kubeconfig with backoff for locking failures + // TODO: factor out into a public errors API w/ backoff handling? + // for now this is easier than coming up with a good API + var err error + for _, b := range []time.Duration{0, time.Millisecond, time.Millisecond * 50, time.Millisecond * 100} { + time.Sleep(b) + if err = kubeconfig.Export(p, opts.Config.Name, opts.KubeconfigPath, true); err == nil { + break + } + } + if err != nil { + return err + } + + // optionally display usage + if opts.DisplayUsage { + logUsage(logger, opts.Config.Name, opts.KubeconfigPath) + } + // optionally give the user a friendly salutation + if opts.DisplaySalutation { + logger.V(0).Info("") + logSalutation(logger) + } + return nil +} + +// alreadyExists returns an error if the cluster name already exists +// or if we had an error checking +func alreadyExists(p providers.Provider, name string) error { + n, err := p.ListNodes(name) + if err != nil { + return err + } + if len(n) != 0 { + return errors.Errorf("node(s) already exist for a cluster with the name %q", name) + } + return nil +} + +func logUsage(logger log.Logger, name, explicitKubeconfigPath string) { + // construct a sample command for interacting with the cluster + kctx := kubeconfig.ContextForCluster(name) + sampleCommand := fmt.Sprintf("kubectl cluster-info --context %s", kctx) + if explicitKubeconfigPath != "" { + // explicit path, include this + sampleCommand += " --kubeconfig " + shellescape.Quote(explicitKubeconfigPath) + } + logger.V(0).Infof(`Set kubectl context to "%s"`, kctx) + logger.V(0).Infof("You can now use your cluster with:\n\n" + sampleCommand) +} + +func logSalutation(logger log.Logger) { + salutations := []string{ + "Have a nice day! 👋", + "Thanks for using kind! 😊", + "Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/", + "Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂", + } + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + s := salutations[r.Intn(len(salutations))] + logger.V(0).Info(s) +} + +func fixupOptions(opts *ClusterOptions) error { + // do post processing for options + // first ensure we at least have a default cluster config + if opts.Config == nil { + cfg, err := encoding.Load("") + if err != nil { + return err + } + opts.Config = cfg + } + + if opts.NameOverride != "" { + opts.Config.Name = opts.NameOverride + } + + // if NodeImage was set, override the image on all nodes + if opts.NodeImage != "" { + // Apply image override to all the Nodes defined in Config + // TODO(fabrizio pandini): this should be reconsidered when implementing + // https://github.com/kubernetes-sigs/kind/issues/133 + for i := range opts.Config.Nodes { + opts.Config.Nodes[i].Image = opts.NodeImage + } + } + + // default config fields (important for usage as a library, where the config + // may be constructed in memory rather than from disk) + config.SetDefaultsCluster(opts.Config) + + return nil +} + +func validateProvider(p providers.Provider) error { + info, err := p.Info() + if err != nil { + return err + } + if info.Rootless { + if !info.Cgroup2 { + return errors.New("running kind with rootless provider requires cgroup v2, see https://kind.sigs.k8s.io/docs/user/rootless/") + } + if !info.SupportsMemoryLimit || !info.SupportsPidsLimit || !info.SupportsCPUShares { + return errors.New("running kind with rootless provider requires setting systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/") + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go new file mode 100644 index 000000000..1a5e2d15c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package delete + +import ( + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Cluster deletes the cluster identified by ctx +// explicitKubeconfigPath is --kubeconfig, following the rules from +// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands +func Cluster(logger log.Logger, p providers.Provider, name, explicitKubeconfigPath string) error { + n, err := p.ListNodes(name) + if err != nil { + return errors.Wrap(err, "error listing nodes") + } + + kerr := kubeconfig.Remove(name, explicitKubeconfigPath) + if kerr != nil { + logger.Errorf("failed to update kubeconfig: %v", kerr) + } + + if len(n) > 0 { + err = p.DeleteNodes(n) + if err != nil { + return err + } + logger.V(0).Infof("Deleted nodes: %q", n) + } + + if kerr != nil { + return kerr + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go new file mode 100644 index 000000000..6aa175819 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go @@ -0,0 +1,522 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/google/safetext/yamltemplate" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// ConfigData is supplied to the kubeadm config template, with values populated +// by the cluster package +type ConfigData struct { + ClusterName string + KubernetesVersion string + // The ControlPlaneEndpoint, that is the address of the external loadbalancer + // if defined or the bootstrap node + ControlPlaneEndpoint string + // The Local API Server port + APIBindPort int + // The API server external listen IP (which we will port forward) + APIServerAddress string + + // this should really be used for the --provider-id flag + // ideally cluster config should not depend on the node backend otherwise ... + NodeProvider string + + // ControlPlane flag specifies the node belongs to the control plane + ControlPlane bool + // The IP address or comma separated list IP addresses of of the node + NodeAddress string + // The name for the node (not the address) + NodeName string + + // The Token for TLS bootstrap + Token string + + // KubeProxyMode defines the kube-proxy mode between iptables or ipvs + KubeProxyMode string + // The subnet used for pods + PodSubnet string + // The subnet used for services + ServiceSubnet string + + // Kubernetes FeatureGates + FeatureGates map[string]bool + + // Kubernetes API Server RuntimeConfig + RuntimeConfig map[string]string + + // IPFamily of the cluster, it can be IPv4, IPv6 or DualStack + IPFamily config.ClusterIPFamily + + // Labels are the labels, in the format "key1=val1,key2=val2", with which the respective node will be labeled + NodeLabels string + + // RootlessProvider is true if kind is running with rootless mode + RootlessProvider bool + + // DisableLocalStorageCapacityIsolation is typically set true based on RootlessProvider + // based on the Kubernetes version, if true kubelet localStorageCapacityIsolation is set false + DisableLocalStorageCapacityIsolation bool + + // DerivedConfigData contains fields computed from the other fields for use + // in the config templates and should only be populated by calling Derive() + DerivedConfigData +} + +// DerivedConfigData fields are automatically derived by +// ConfigData.Derive if they are not specified / zero valued +type DerivedConfigData struct { + // AdvertiseAddress is the first address in NodeAddress + AdvertiseAddress string + // DockerStableTag is automatically derived from KubernetesVersion + DockerStableTag string + // SortedFeatureGates allows us to iterate FeatureGates deterministically + SortedFeatureGates []FeatureGate + // FeatureGatesString is of the form `Foo=true,Baz=false` + FeatureGatesString string + // RuntimeConfigString is of the form `Foo=true,Baz=false` + RuntimeConfigString string + // KubeadmFeatureGates contains Kubeadm only feature gates + KubeadmFeatureGates map[string]bool + // IPv4 values take precedence over IPv6 by default, if true set IPv6 default values + IPv6 bool + // kubelet cgroup driver, based on kubernetes version + CgroupDriver string +} + +type FeatureGate struct { + Name string + Value bool +} + +// Derive automatically derives DockerStableTag if not specified +func (c *ConfigData) Derive() { + // default cgroup driver + // TODO: refactor and move all deriving logic to this method + c.CgroupDriver = "systemd" + + // get the first address to use it as the API advertised address + c.AdvertiseAddress = strings.Split(c.NodeAddress, ",")[0] + + if c.DockerStableTag == "" { + c.DockerStableTag = strings.Replace(c.KubernetesVersion, "+", "_", -1) + } + + // get the IP addresses family for defaulting components + c.IPv6 = c.IPFamily == config.IPv6Family + + // get sorted list of FeatureGate keys + featureGateKeys := make([]string, 0, len(c.FeatureGates)) + for k := range c.FeatureGates { + featureGateKeys = append(featureGateKeys, k) + } + sort.Strings(featureGateKeys) + + // create a sorted key=value,... string of FeatureGates + c.SortedFeatureGates = make([]FeatureGate, 0, len(c.FeatureGates)) + featureGates := make([]string, 0, len(c.FeatureGates)) + for _, k := range featureGateKeys { + v := c.FeatureGates[k] + featureGates = append(featureGates, fmt.Sprintf("%s=%t", k, v)) + c.SortedFeatureGates = append(c.SortedFeatureGates, FeatureGate{ + Name: k, + Value: v, + }) + } + c.FeatureGatesString = strings.Join(featureGates, ",") + + // create a sorted key=value,... string of RuntimeConfig + // first get sorted list of FeatureGate keys + runtimeConfigKeys := make([]string, 0, len(c.RuntimeConfig)) + for k := range c.RuntimeConfig { + runtimeConfigKeys = append(runtimeConfigKeys, k) + } + sort.Strings(runtimeConfigKeys) + // stringify + var runtimeConfig []string + for _, k := range runtimeConfigKeys { + v := c.RuntimeConfig[k] + // TODO: do we need to quote / escape these in the future? + // Currently runtime config is in practice booleans, no special characters + runtimeConfig = append(runtimeConfig, fmt.Sprintf("%s=%s", k, v)) + } + c.RuntimeConfigString = strings.Join(runtimeConfig, ",") +} + +// See docs for these APIs at: +// https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories +// EG: +// https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 + +// ConfigTemplateBetaV2 is the kubeadm config template for API version v1beta2 +const ConfigTemplateBetaV2 = `# config generated by kind +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +metadata: + name: config +kubernetesVersion: {{.KubernetesVersion}} +clusterName: "{{.ClusterName}}" +{{ if .KubeadmFeatureGates}}featureGates: +{{ range $key, $value := .KubeadmFeatureGates }} + "{{ (StructuralData $key) }}": {{ $value }} +{{end}}{{end}} +controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}" +# on docker for mac we have to expose the api server via port forward, +# so we need to ensure the cert is valid for localhost so we can talk +# to the cluster after rewriting the kubeconfig to point to localhost +apiServer: + certSANs: [localhost, "{{.APIServerAddress}}"] + extraArgs: + "runtime-config": "{{ .RuntimeConfigString }}" +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end}} +controllerManager: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + enable-hostpath-provisioner: "true" + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::" + {{- end }} +scheduler: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::1" + {{- end }} +networking: + podSubnet: "{{ .PodSubnet }}" + serviceSubnet: "{{ .ServiceSubnet }}" +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +metadata: + name: config +# we use a well know token for TLS bootstrap +bootstrapTokens: +- token: "{{ .Token }}" +# we use a well know port for making the API server discoverable inside docker network. +# from the host machine such port will be accessible via a random local port instead. +localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +--- +# no-op entry that exists solely so it can be patched +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +metadata: + name: config +{{ if .ControlPlane -}} +controlPlane: + localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +{{- end }} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +discovery: + bootstrapToken: + apiServerEndpoint: "{{ .ControlPlaneEndpoint }}" + token: "{{ .Token }}" + unsafeSkipCAVerification: true +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +metadata: + name: config +cgroupDriver: {{ .CgroupDriver }} +cgroupRoot: /kubelet +failSwapOn: false +# configure ipv6 addresses in IPv6 mode +{{ if .IPv6 -}} +address: "::" +healthzBindAddress: "::" +{{- end }} +# disable disk resource management by default +# kubelet will see the host disk that the inner container runtime +# is ultimately backed by and attempt to recover disk space. we don't want that. +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +{{if ne .KubeProxyMode "None"}} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metadata: + name: config +mode: "{{ .KubeProxyMode }}" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +iptables: + minSyncPeriod: 1s +conntrack: +# Skip setting sysctl value "net.netfilter.nf_conntrack_max" +# It is a global variable that affects other namespaces + maxPerCore: 0 +{{if .RootlessProvider}} +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" + tcpEstablishedTimeout: 0s +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" + tcpCloseWaitTimeout: 0s +{{end}}{{end}} +` + +// ConfigTemplateBetaV3 is the kubeadm config template for API version v1beta3 +const ConfigTemplateBetaV3 = `# config generated by kind +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +metadata: + name: config +kubernetesVersion: {{.KubernetesVersion}} +clusterName: "{{.ClusterName}}" +{{ if .KubeadmFeatureGates}}featureGates: +{{ range $key, $value := .KubeadmFeatureGates }} + "{{ (StructuralData $key) }}": {{ $value }} +{{end}}{{end}} +controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}" +# on docker for mac we have to expose the api server via port forward, +# so we need to ensure the cert is valid for localhost so we can talk +# to the cluster after rewriting the kubeconfig to point to localhost +apiServer: + certSANs: [localhost, "{{.APIServerAddress}}"] + extraArgs: + "runtime-config": "{{ .RuntimeConfigString }}" +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end}} +controllerManager: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + enable-hostpath-provisioner: "true" + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::" + {{- end }} +scheduler: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::1" + {{- end }} +networking: + podSubnet: "{{ .PodSubnet }}" + serviceSubnet: "{{ .ServiceSubnet }}" +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +metadata: + name: config +# we use a well know token for TLS bootstrap +bootstrapTokens: +- token: "{{ .Token }}" +# we use a well know port for making the API server discoverable inside docker network. +# from the host machine such port will be accessible via a random local port instead. +localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +--- +# no-op entry that exists solely so it can be patched +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +metadata: + name: config +{{ if .ControlPlane -}} +controlPlane: + localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +{{- end }} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +discovery: + bootstrapToken: + apiServerEndpoint: "{{ .ControlPlaneEndpoint }}" + token: "{{ .Token }}" + unsafeSkipCAVerification: true +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +metadata: + name: config +cgroupDriver: {{ .CgroupDriver }} +cgroupRoot: /kubelet +failSwapOn: false +# configure ipv6 addresses in IPv6 mode +{{ if .IPv6 -}} +address: "::" +healthzBindAddress: "::" +{{- end }} +# disable disk resource management by default +# kubelet will see the host disk that the inner container runtime +# is ultimately backed by and attempt to recover disk space. we don't want that. +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +{{if .DisableLocalStorageCapacityIsolation}}localStorageCapacityIsolation: false{{end}} +{{if ne .KubeProxyMode "None"}} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metadata: + name: config +mode: "{{ .KubeProxyMode }}" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +iptables: + minSyncPeriod: 1s +conntrack: +# Skip setting sysctl value "net.netfilter.nf_conntrack_max" +# It is a global variable that affects other namespaces + maxPerCore: 0 +{{if .RootlessProvider}} +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" + tcpEstablishedTimeout: 0s +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" + tcpCloseWaitTimeout: 0s +{{end}}{{end}} +` + +// Config returns a kubeadm config generated from config data, in particular +// the kubernetes version +func Config(data ConfigData) (config string, err error) { + ver, err := version.ParseGeneric(data.KubernetesVersion) + if err != nil { + return "", err + } + + // ensure featureGates is non-nil, as we may add entries + if data.FeatureGates == nil { + data.FeatureGates = make(map[string]bool) + } + + if data.RootlessProvider { + if ver.LessThan(version.MustParseSemantic("v1.22.0")) { + // rootless kind v0.12.x supports Kubernetes v1.22 with KubeletInUserNamespace gate. + // rootless kind v0.11.x supports older Kubernetes with fake procfs. + return "", errors.Errorf("version %q is not compatible with rootless provider (hint: kind v0.11.x may work with this version)", ver) + } + data.FeatureGates["KubeletInUserNamespace"] = true + + // For avoiding err="failed to get rootfs info: failed to get device for dir \"/var/lib/kubelet\": could not find device with major: 0, minor: 41 in cached partitions map" + // https://github.com/kubernetes-sigs/kind/issues/2524 + if ver.LessThan(version.MustParseSemantic("v1.25.0-alpha.3.440+0064010cddfa00")) { + // this feature gate was removed in v1.25 and replaced by an opt-out to disable + data.FeatureGates["LocalStorageCapacityIsolation"] = false + } else { + // added in v1.25 https://github.com/kubernetes/kubernetes/pull/111513 + data.DisableLocalStorageCapacityIsolation = true + } + } + + // assume the latest API version, then fallback if the k8s version is too low + templateSource := ConfigTemplateBetaV3 + if ver.LessThan(version.MustParseSemantic("v1.23.0")) { + templateSource = ConfigTemplateBetaV2 + } + + t, err := yamltemplate.New("kubeadm-config").Parse(templateSource) + if err != nil { + return "", errors.Wrap(err, "failed to parse config template") + } + + // derive any automatic fields if not supplied + data.Derive() + + // Kubeadm has its own feature-gate for dual stack + // we need to enable it for Kubernetes version 1.20 only + // dual-stack is only supported in 1.20+ + // TODO: remove this when 1.20 is EOL or we no longer support + // dual-stack for 1.20 in KIND + if ver.LessThan(version.MustParseSemantic("v1.21.0")) && + ver.AtLeast(version.MustParseSemantic("v1.20.0")) { + data.KubeadmFeatureGates = make(map[string]bool) + data.KubeadmFeatureGates["IPv6DualStack"] = true + } + + // before 1.24 kind uses cgroupfs + // after 1.24 kind uses systemd starting in kind v0.13.0 + // before kind v0.13.0 kubernetes 1.24 wasn't released yet + if ver.LessThan(version.MustParseSemantic("v1.24.0")) { + data.CgroupDriver = "cgroupfs" + } + + // execute the template + var buff bytes.Buffer + err = t.Execute(&buff, data) + if err != nil { + return "", errors.Wrap(err, "error executing config template") + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go new file mode 100644 index 000000000..f8a0e0df2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +// Token defines a dummy, well known token for automating TLS bootstrap process +const Token = "abcdef.0123456789abcdef" + +// ObjectName is the name every generated object will have +// I.E. `metadata:\nname: config` +const ObjectName = "config" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go new file mode 100644 index 000000000..f023db7df --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadm contains kubeadm related constants and configuration +package kubeadm diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go new file mode 100644 index 000000000..336212eff --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "bytes" + + yaml "gopkg.in/yaml.v3" + kubeyaml "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +// Encode encodes the cfg to yaml +func Encode(cfg *Config) ([]byte, error) { + // NOTE: kubernetes's yaml library doesn't handle inline fields very well + // so we're not using that to marshal + encoded, err := yaml.Marshal(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed to encode KUBECONFIG") + } + + // normalize with kubernetes's yaml library + // this is not strictly necessary, but it ensures minimal diffs when + // modifying kubeconfig files, which is nice to have + encoded, err = normYaml(encoded) + if err != nil { + return nil, errors.Wrap(err, "failed to normalize KUBECONFIG encoding") + } + + return encoded, nil +} + +// normYaml round trips yaml bytes through sigs.k8s.io/yaml to normalize them +// versus other kubernetes ecosystem yaml output +func normYaml(y []byte) ([]byte, error) { + var unstructured interface{} + if err := kubeyaml.Unmarshal(y, &unstructured); err != nil { + return nil, err + } + encoded, err := kubeyaml.Marshal(&unstructured) + if err != nil { + return nil, err + } + // special case: don't write anything when empty + if bytes.Equal(encoded, []byte("{}\n")) { + return []byte{}, nil + } + return encoded, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go new file mode 100644 index 000000000..b92393083 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "sigs.k8s.io/kind/pkg/errors" +) + +// KINDClusterKey identifies kind clusters in kubeconfig files +func KINDClusterKey(clusterName string) string { + return "kind-" + clusterName +} + +// checkKubeadmExpectations validates that a kubeadm created KUBECONFIG meets +// our expectations, namely on the number of entries +func checkKubeadmExpectations(cfg *Config) error { + if len(cfg.Clusters) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one cluster, but read %d", len(cfg.Clusters)) + } + if len(cfg.Users) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one user, but read %d", len(cfg.Users)) + } + if len(cfg.Contexts) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one context, but read %d", len(cfg.Contexts)) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go new file mode 100644 index 000000000..41d1be27f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path/filepath" +) + +// these are from +// https://github.com/kubernetes/client-go/blob/611184f7c43ae2d520727f01d49620c7ed33412d/tools/clientcmd/loader.go#L439-L440 + +func lockFile(filename string) error { + // Make sure the dir exists before we try to create a lock file. + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) + if err != nil { + return err + } + f.Close() + return nil +} + +func unlockFile(filename string) error { + return os.Remove(lockName(filename)) +} + +func lockName(filename string) string { + return filename + ".lock" +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go new file mode 100644 index 000000000..ec4a2164d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + + "sigs.k8s.io/kind/pkg/errors" +) + +// WriteMerged writes a kind kubeconfig (see KINDFromRawKubeadm) into configPath +// merging with the existing contents if any and setting the current context to +// the kind config's current context. +func WriteMerged(kindConfig *Config, explicitConfigPath string) error { + // figure out what filepath we should use + configPath := pathForMerge(explicitConfigPath, os.Getenv) + + // lock config file the same as client-go + if err := lockFile(configPath); err != nil { + return errors.Wrap(err, "failed to lock config file") + } + defer func() { + _ = unlockFile(configPath) + }() + + // read in existing + existing, err := read(configPath) + if err != nil { + return errors.Wrap(err, "failed to get kubeconfig to merge") + } + + // merge with kind kubeconfig + if err := merge(existing, kindConfig); err != nil { + return err + } + + // write back out + return write(existing, configPath) +} + +// merge kind config into an existing config +func merge(existing, kind *Config) error { + // verify assumptions about kubeadm / kind kubeconfigs + if err := checkKubeadmExpectations(kind); err != nil { + return err + } + + // insert or append cluster entry + shouldAppend := true + for i := range existing.Clusters { + if existing.Clusters[i].Name == kind.Clusters[0].Name { + existing.Clusters[i] = kind.Clusters[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Clusters = append(existing.Clusters, kind.Clusters[0]) + } + + // insert or append user entry + shouldAppend = true + for i := range existing.Users { + if existing.Users[i].Name == kind.Users[0].Name { + existing.Users[i] = kind.Users[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Users = append(existing.Users, kind.Users[0]) + } + + // insert or append context entry + shouldAppend = true + for i := range existing.Contexts { + if existing.Contexts[i].Name == kind.Contexts[0].Name { + existing.Contexts[i] = kind.Contexts[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Contexts = append(existing.Contexts, kind.Contexts[0]) + } + + // set the current context + existing.CurrentContext = kind.CurrentContext + + // TODO: We should not need this, but it allows broken clients that depend + // on apiVersion and kind to work. Notably the upstream javascript client. + // See: https://github.com/kubernetes-sigs/kind/issues/1242 + if len(existing.OtherFields) == 0 { + // TODO: Should we be deep-copying? for now we don't need to + // and doing so would be a pain (re and de-serialize maybe?) :shrug: + existing.OtherFields = kind.OtherFields + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go new file mode 100644 index 000000000..ab55792c4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go @@ -0,0 +1,167 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path" + "path/filepath" + "runtime" + + "sigs.k8s.io/kind/pkg/internal/sets" +) + +const kubeconfigEnv = "KUBECONFIG" + +/* +paths returns the list of paths to be considered for kubeconfig files +where explicitPath is the value of --kubeconfig + +# Logic based on kubectl + +https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands + +- If the --kubeconfig flag is set, then only that file is loaded. The flag may only be set once and no merging takes place. + +- If $KUBECONFIG environment variable is set, then it is used as a list of paths (normal path delimiting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. - If no files in the chain exist, then it creates the last file in the list. + +- Otherwise, ${HOME}/.kube/config is used and no merging takes place. +*/ +func paths(explicitPath string, getEnv func(string) string) []string { + if explicitPath != "" { + return []string{explicitPath} + } + + paths := discardEmptyAndDuplicates( + filepath.SplitList(getEnv(kubeconfigEnv)), + ) + if len(paths) != 0 { + return paths + } + + return []string{path.Join(homeDir(runtime.GOOS, getEnv), ".kube", "config")} +} + +// pathForMerge returns the file that kubectl would merge into +func pathForMerge(explicitPath string, getEnv func(string) string) string { + // find the first file that exists + p := paths(explicitPath, getEnv) + if len(p) == 1 { + return p[0] + } + for _, filename := range p { + if fileExists(filename) { + return filename + } + } + // otherwise the last file + return p[len(p)-1] +} + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +func discardEmptyAndDuplicates(paths []string) []string { + seen := sets.NewString() + kept := 0 + for _, p := range paths { + if p != "" && !seen.Has(p) { + paths[kept] = p + kept++ + seen.Insert(p) + } + } + return paths[:kept] +} + +// homeDir returns the home directory for the current user. +// On Windows: +// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. +// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. +// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. +// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. +// NOTE this is from client-go. Rather than pull in client-go for this one +// standalone method, we have a fork here. +// https://github.com/kubernetes/client-go/blob/6d7018244d72350e2e8c4a19ccdbe4c8083a9143/util/homedir/homedir.go +// We've modified this to require injecting os.Getenv and runtime.GOOS as a dependencies for testing purposes +func homeDir(GOOS string, getEnv func(string) string) string { + if GOOS == "windows" { + home := getEnv("HOME") + homeDriveHomePath := "" + if homeDrive, homePath := getEnv("HOMEDRIVE"), getEnv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDriveHomePath = homeDrive + homePath + } + userProfile := getEnv("USERPROFILE") + + // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. + // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. + for _, p := range []string{home, homeDriveHomePath, userProfile} { + if len(p) == 0 { + continue + } + if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { + continue + } + return p + } + + firstSetPath := "" + firstExistingPath := "" + + // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools + for _, p := range []string{home, userProfile, homeDriveHomePath} { + if len(p) == 0 { + continue + } + if len(firstSetPath) == 0 { + // remember the first path that is set + firstSetPath = p + } + info, err := os.Stat(p) + if err != nil { + continue + } + if len(firstExistingPath) == 0 { + // remember the first path that exists + firstExistingPath = p + } + if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { + // return first path that is writeable + return p + } + } + + // If none are writeable, return first location that exists + if len(firstExistingPath) > 0 { + return firstExistingPath + } + + // If none exist, return first location that is set + if len(firstSetPath) > 0 { + return firstSetPath + } + + // We've got nothing + return "" + } + return getEnv("HOME") +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go new file mode 100644 index 000000000..d483d106e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "io" + "os" + + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/errors" +) + +// KINDFromRawKubeadm returns a kind kubeconfig derived from the raw kubeadm kubeconfig, +// the kind clusterName, and the server. +// server is ignored if unset. +func KINDFromRawKubeadm(rawKubeadmKubeConfig, clusterName, server string) (*Config, error) { + cfg := &Config{} + if err := yaml.Unmarshal([]byte(rawKubeadmKubeConfig), cfg); err != nil { + return nil, err + } + + // verify assumptions about kubeadm kubeconfigs + if err := checkKubeadmExpectations(cfg); err != nil { + return nil, err + } + + // compute unique kubeconfig key for this cluster + key := KINDClusterKey(clusterName) + + // use the unique key for all named references + cfg.Clusters[0].Name = key + cfg.Users[0].Name = key + cfg.Contexts[0].Name = key + cfg.Contexts[0].Context.User = key + cfg.Contexts[0].Context.Cluster = key + cfg.CurrentContext = key + + // patch server field if server was set + if server != "" { + cfg.Clusters[0].Cluster.Server = server + } + + return cfg, nil +} + +// read loads a KUBECONFIG file from configPath +func read(configPath string) (*Config, error) { + // try to open, return default if no such file + f, err := os.Open(configPath) + if os.IsNotExist(err) { + return &Config{}, nil + } else if err != nil { + return nil, errors.WithStack(err) + } + + // otherwise read in and deserialize + cfg := &Config{} + rawExisting, err := io.ReadAll(f) + if err != nil { + return nil, errors.WithStack(err) + } + if err := yaml.Unmarshal(rawExisting, cfg); err != nil { + return nil, errors.WithStack(err) + } + + return cfg, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go new file mode 100644 index 000000000..e5fed9556 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + + "sigs.k8s.io/kind/pkg/errors" +) + +// RemoveKIND removes the kind cluster kindClusterName from the KUBECONFIG +// files at configPaths +func RemoveKIND(kindClusterName string, explicitPath string) error { + // remove kind from each if present + for _, configPath := range paths(explicitPath, os.Getenv) { + if err := func(configPath string) error { + // lock before modifying + if err := lockFile(configPath); err != nil { + return errors.Wrap(err, "failed to lock config file") + } + defer func(configPath string) { + _ = unlockFile(configPath) + }(configPath) + + // read in existing + existing, err := read(configPath) + if err != nil { + return errors.Wrap(err, "failed to read kubeconfig to remove KIND entry") + } + + // remove the kind cluster from the config + if remove(existing, kindClusterName) { + // write out the updated config if we modified anything + if err := write(existing, configPath); err != nil { + return err + } + } + + return nil + }(configPath); err != nil { + return err + } + } + return nil +} + +// remove drops kindClusterName entries from the cfg +func remove(cfg *Config, kindClusterName string) bool { + mutated := false + + // get kind cluster identifier + key := KINDClusterKey(kindClusterName) + + // filter out kind cluster from clusters + kept := 0 + for _, c := range cfg.Clusters { + if c.Name != key { + cfg.Clusters[kept] = c + kept++ + } else { + mutated = true + } + } + cfg.Clusters = cfg.Clusters[:kept] + + // filter out kind cluster from users + kept = 0 + for _, u := range cfg.Users { + if u.Name != key { + cfg.Users[kept] = u + kept++ + } else { + mutated = true + } + } + cfg.Users = cfg.Users[:kept] + + // filter out kind cluster from contexts + kept = 0 + for _, c := range cfg.Contexts { + if c.Name != key { + cfg.Contexts[kept] = c + kept++ + } else { + mutated = true + } + } + cfg.Contexts = cfg.Contexts[:kept] + + // unset current context if it points to this cluster + if cfg.CurrentContext == key { + cfg.CurrentContext = "" + mutated = true + } + + return mutated +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go new file mode 100644 index 000000000..1da5df545 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +/* +NOTE: all of these types are based on the upstream v1 types from client-go +https://github.com/kubernetes/client-go/blob/0bdba2f9188006fc64057c2f6d82a0f9ee0ee422/tools/clientcmd/api/v1/types.go + +We've forked them to: +- remove types and fields kind does not need to inspect / modify +- generically support fields kind doesn't inspect / modify using yaml.v3 +- have clearer names (AuthInfo -> User) +*/ + +// Config represents a KUBECONFIG, with the fields kind is likely to use +// Other fields are handled as unstructured data purely read for writing back +// to disk via the OtherFields field +type Config struct { + // Clusters is a map of referenceable names to cluster configs + Clusters []NamedCluster `yaml:"clusters,omitempty"` + // Users is a map of referenceable names to user configs + Users []NamedUser `yaml:"users,omitempty"` + // Contexts is a map of referenceable names to context configs + Contexts []NamedContext `yaml:"contexts,omitempty"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `yaml:"current-context,omitempty"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `yaml:"name"` + // Cluster holds the cluster information + Cluster Cluster `yaml:"cluster"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `yaml:"server,omitempty"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} + +// NamedUser relates nicknames to user information +type NamedUser struct { + // Name is the nickname for this User + Name string `yaml:"name"` + // User holds the user information + // We do not touch this and merely write it back + User map[string]interface{} `yaml:"user"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `yaml:"name"` + // Context holds the context information + Context Context `yaml:"context"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `yaml:"cluster"` + // User is the name of the User for this context + User string `yaml:"user"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go new file mode 100644 index 000000000..1e8de4ecb --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path/filepath" + + "sigs.k8s.io/kind/pkg/errors" +) + +// write writes cfg to configPath +// it will ensure the directories in the path if necessary +func write(cfg *Config, configPath string) error { + encoded, err := Encode(cfg) + if err != nil { + return err + } + // NOTE: 0755 / 0600 are to match client-go + dir := filepath.Dir(configPath) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return errors.Wrap(err, "failed to create directory for KUBECONFIG") + } + } + if err := os.WriteFile(configPath, encoded, 0600); err != nil { + return errors.Wrap(err, "failed to write KUBECONFIG") + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go new file mode 100644 index 000000000..c5e4c2ace --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeconfig provides utilities kind uses internally to manage +// kind cluster kubeconfigs +package kubeconfig + +import ( + "bytes" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + + // this package has slightly more generic kubeconfig helpers + // and minimal dependencies on the rest of kind + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Export exports the kubeconfig given the cluster context and a path to write it to +// This will always be an external kubeconfig +func Export(p providers.Provider, name, explicitPath string, external bool) error { + cfg, err := get(p, name, external) + if err != nil { + return err + } + return kubeconfig.WriteMerged(cfg, explicitPath) +} + +// Remove removes clusterName from the kubeconfig paths detected based on +// either explicitPath being set or $KUBECONFIG or $HOME/.kube/config, following +// the rules set by kubectl +// clusterName must identify a kind cluster. +func Remove(clusterName, explicitPath string) error { + return kubeconfig.RemoveKIND(clusterName, explicitPath) +} + +// Get returns the kubeconfig for the cluster +// external controls if the internal IP address is used or the host endpoint +func Get(p providers.Provider, name string, external bool) (string, error) { + cfg, err := get(p, name, external) + if err != nil { + return "", err + } + b, err := kubeconfig.Encode(cfg) + if err != nil { + return "", err + } + return string(b), err +} + +// ContextForCluster returns the context name for a kind cluster based on +// its name. This key is used for all list entries of kind clusters +func ContextForCluster(kindClusterName string) string { + return kubeconfig.KINDClusterKey(kindClusterName) +} + +func get(p providers.Provider, name string, external bool) (*kubeconfig.Config, error) { + // find a control plane node to get the kubeadm config from + n, err := p.ListNodes(name) + if err != nil { + return nil, err + } + var buff bytes.Buffer + nodes, err := nodeutils.ControlPlaneNodes(n) + if err != nil { + return nil, err + } + if len(nodes) < 1 { + return nil, errors.Errorf("could not locate any control plane nodes for cluster named '%s'. "+ + "Use the --name option to select a different cluster", name) + } + node := nodes[0] + + // grab kubeconfig version from the node + if err := node.Command("cat", "/etc/kubernetes/admin.conf").SetStdout(&buff).Run(); err != nil { + return nil, errors.Wrap(err, "failed to get cluster internal kubeconfig") + } + + // if we're doing external we need to override the server endpoint + server := "" + if external { + endpoint, err := p.GetAPIServerEndpoint(name) + if err != nil { + return nil, err + } + server = "https://" + endpoint + } + + // actually encode + return kubeconfig.KINDFromRawKubeadm(buff.String(), name, server) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go new file mode 100644 index 000000000..185565f66 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +import ( + "bytes" + "text/template" + + "sigs.k8s.io/kind/pkg/errors" +) + +// ConfigData is supplied to the loadbalancer config template +type ConfigData struct { + ControlPlanePort int + BackendServers map[string]string + IPv6 bool +} + +// DefaultConfigTemplate is the loadbalancer config template +const DefaultConfigTemplate = `# generated by kind +global + log /dev/log local0 + log /dev/log local1 notice + daemon + # limit memory usage to approximately 18 MB + maxconn 100000 + +resolvers docker + nameserver dns 127.0.0.11:53 + +defaults + log global + mode tcp + option dontlognull + # TODO: tune these + timeout connect 5000 + timeout client 50000 + timeout server 50000 + # allow to boot despite dns don't resolve backends + default-server init-addr none + +frontend control-plane + bind *:{{ .ControlPlanePort }} + {{ if .IPv6 -}} + bind :::{{ .ControlPlanePort }}; + {{- end }} + default_backend kube-apiservers + +backend kube-apiservers + option httpchk GET /healthz + # TODO: we should be verifying (!) + {{range $server, $address := .BackendServers}} + server {{ $server }} {{ $address }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} + {{- end}} +` + +// Config returns a kubeadm config generated from config data, in particular +// the kubernetes version +func Config(data *ConfigData) (config string, err error) { + t, err := template.New("loadbalancer-config").Parse(DefaultConfigTemplate) + if err != nil { + return "", errors.Wrap(err, "failed to parse config template") + } + // execute the template + var buff bytes.Buffer + err = t.Execute(&buff, data) + if err != nil { + return "", errors.Wrap(err, "error executing config template") + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go new file mode 100644 index 000000000..3600b338b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +// Image defines the loadbalancer image:tag +const Image = "docker.io/kindest/haproxy:v20230606-42a2262b" + +// ConfigPath defines the path to the config file in the image +const ConfigPath = "/usr/local/etc/haproxy/haproxy.cfg" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go new file mode 100644 index 000000000..6e53f388d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loadbalancer contains external loadbalancer related constants and configuration +package loadbalancer diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go new file mode 100644 index 000000000..000960370 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package logs contains tooling for obtaining cluster logs +package logs diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go new file mode 100644 index 000000000..2dc5941ef --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path" + "path/filepath" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" +) + +// DumpDir dumps the dir nodeDir on the node to the dir hostDir on the host +func DumpDir(logger log.Logger, node nodes.Node, nodeDir, hostDir string) (err error) { + cmd := node.Command( + "sh", "-c", + // Tar will exit 1 if a file changed during the archival. + // We don't care about this, so we're invoking it in a shell + // And masking out 1 as a return value. + // Fatal errors will return exit code 2. + // http://man7.org/linux/man-pages/man1/tar.1.html#RETURN_VALUE + fmt.Sprintf( + `tar --hard-dereference -C %s -chf - . || (r=$?; [ $r -eq 1 ] || exit $r)`, + shellescape.Quote(path.Clean(nodeDir)+"/"), + ), + ) + + return exec.RunWithStdoutReader(cmd, func(outReader io.Reader) error { + if err := untar(logger, outReader, hostDir); err != nil { + return errors.Wrapf(err, "Untarring %q: %v", nodeDir, err) + } + return nil + }) +} + +// untar reads the tar file from r and writes it into dir. +func untar(logger log.Logger, r io.Reader, dir string) (err error) { + tr := tar.NewReader(r) + for { + f, err := tr.Next() + + switch { + case err == io.EOF: + // drain the reader, which may have trailing null bytes + // we don't want to leave the writer hanging + _, err := io.Copy(io.Discard, r) + return err + case err != nil: + return errors.Wrapf(err, "tar reading error: %v", err) + case f == nil: + continue + } + + rel := filepath.FromSlash(f.Name) + abs := filepath.Join(dir, rel) + + switch f.Typeflag { + case tar.TypeReg: + wf, err := os.OpenFile(abs, os.O_CREATE|os.O_RDWR, os.FileMode(f.Mode)) + if err != nil { + return err + } + n, err := io.Copy(wf, tr) + if closeErr := wf.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return errors.Errorf("error writing to %s: %v", abs, err) + } + if n != f.Size { + return errors.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size) + } + case tar.TypeDir: + if _, err := os.Stat(abs); err != nil { + if err := os.MkdirAll(abs, 0755); err != nil { + return err + } + } + default: + logger.Warnf("tar file entry %s contained unsupported file type %v", f.Name, f.Typeflag) + } + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go new file mode 100644 index 000000000..9ef3abd04 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go @@ -0,0 +1,85 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "bufio" + "context" + "os" + "regexp" + "sync" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +var nodeReachedCgroupsReadyRegexp *regexp.Regexp +var nodeReachedCgroupsReadyRegexpCompileOnce sync.Once + +// NodeReachedCgroupsReadyRegexp returns a regexp for use with WaitUntilLogRegexpMatches +// +// This is used to avoid "ERROR: this script needs /sys/fs/cgroup/cgroup.procs to be empty (for writing the top-level cgroup.subtree_control)" +// See https://github.com/kubernetes-sigs/kind/issues/2409 +// +// This pattern matches either "detected cgroupv1" from the kind node image's entrypoint logs +// or "Multi-User System" target if is using cgroups v2, +// so that `docker exec` can be executed safely without breaking cgroup v2 hierarchy. +func NodeReachedCgroupsReadyRegexp() *regexp.Regexp { + nodeReachedCgroupsReadyRegexpCompileOnce.Do(func() { + // This is an approximation, see: https://github.com/kubernetes-sigs/kind/pull/2421 + nodeReachedCgroupsReadyRegexp = regexp.MustCompile("Reached target .*Multi-User System.*|detected cgroup v1") + }) + return nodeReachedCgroupsReadyRegexp +} + +// WaitUntilLogRegexpMatches waits until logCmd output produces a line matching re. +// It will use logCtx to determine if the logCmd deadline was exceeded for producing +// the most useful error message in failure cases, logCtx should be the context +// supplied to create logCmd with CommandContext +func WaitUntilLogRegexpMatches(logCtx context.Context, logCmd exec.Cmd, re *regexp.Regexp) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + logCmd.SetStdout(pw) + logCmd.SetStderr(pw) + + defer pr.Close() + cmdErrC := make(chan error, 1) + go func() { + defer pw.Close() + cmdErrC <- logCmd.Run() + }() + + sc := bufio.NewScanner(pr) + for sc.Scan() { + line := sc.Text() + if re.MatchString(line) { + return nil + } + } + + // when we timeout the process will have been killed due to the timeout, which is not interesting + // in other cases if the command errored this may be a useful error + if ctxErr := logCtx.Err(); ctxErr != context.DeadlineExceeded { + if cmdErr := <-cmdErrC; cmdErr != nil { + return errors.Wrap(cmdErr, "failed to read logs") + } + } + // otherwise generic error + return errors.Errorf("could not find a log line that matches %q", re.String()) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go new file mode 100644 index 000000000..131218f19 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// APIServerInternalPort defines the port where the control plane is listening +// _inside_ the node network +const APIServerInternalPort = 6443 diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go new file mode 100644 index 000000000..dd4311626 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package common contains common code for implementing providers +package common diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go new file mode 100644 index 000000000..5c50da94c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "net" +) + +// PortOrGetFreePort is a helper that either returns the provided port +// if valid or returns a new free port on listenAddr +func PortOrGetFreePort(port int32, listenAddr string) (int32, error) { + // in the case of -1 we actually want to pass 0 to the backend to let it pick + if port == -1 { + return 0, nil + } + // in the case of 0 (unset) we want kind to pick one and supply it to the backend + if port == 0 { + return GetFreePort(listenAddr) + } + // otherwise keep the port + return port, nil +} + +// GetFreePort is a helper used to get a free TCP port on the host +func GetFreePort(listenAddr string) (int32, error) { + dummyListener, err := net.Listen("tcp", net.JoinHostPort(listenAddr, "0")) + if err != nil { + return 0, err + } + defer dummyListener.Close() + port := dummyListener.Addr().(*net.TCPAddr).Port + return int32(port), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go new file mode 100644 index 000000000..080a337f5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/sets" +) + +// RequiredNodeImages returns the set of _node_ images specified by the config +// This does not include the loadbalancer image, and is only used to improve +// the UX by explicit pulling the node images prior to running +func RequiredNodeImages(cfg *config.Cluster) sets.String { + images := sets.NewString() + for _, node := range cfg.Nodes { + images.Insert(node.Image) + } + return images +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go new file mode 100644 index 000000000..ab2b4c376 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go @@ -0,0 +1,59 @@ +package common + +import ( + "os" + "path/filepath" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// CollectLogs provides the common functionality +// to get various debug info from the node +func CollectLogs(n nodes.Node, dir string) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := FileOnHost(filepath.Join(dir, path)) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + + return errors.AggregateConcurrent([]func() error{ + // record info about the node container + execToPathFn( + n.Command("cat", "/kind/version"), + "kubernetes-version.txt", + ), + execToPathFn( + n.Command("journalctl", "--no-pager"), + "journal.log", + ), + execToPathFn( + n.Command("journalctl", "--no-pager", "-u", "kubelet.service"), + "kubelet.log", + ), + execToPathFn( + n.Command("journalctl", "--no-pager", "-u", "containerd.service"), + "containerd.log", + ), + execToPathFn( + n.Command("crictl", "images"), + "images.log", + ), + }) +} + +// FileOnHost is a helper to create a file at path +// even if the parent directory doesn't exist +// in which case it will be created with ModePerm +func FileOnHost(path string) (*os.File, error) { + if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { + return nil, err + } + return os.Create(path) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go new file mode 100644 index 000000000..9abade8ce --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" +) + +// MakeNodeNamer returns a func(role string)(nodeName string) +// used to name nodes based on their role and the clusterName +func MakeNodeNamer(clusterName string) func(string) string { + counter := make(map[string]int) + return func(role string) string { + count := 1 + suffix := "" + if v, ok := counter[role]; ok { + count += v + suffix = fmt.Sprintf("%d", count) + } + counter[role] = count + return fmt.Sprintf("%s-%s%s", clusterName, role, suffix) + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go new file mode 100644 index 000000000..4e896b6e1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "os" + "strings" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +const ( + // HTTPProxy is the HTTP_PROXY environment variable key + HTTPProxy = "HTTP_PROXY" + // HTTPSProxy is the HTTPS_PROXY environment variable key + HTTPSProxy = "HTTPS_PROXY" + // NOProxy is the NO_PROXY environment variable key + NOProxy = "NO_PROXY" +) + +// GetProxyEnvs returns a map of proxy environment variables to their values +// If proxy settings are set, NO_PROXY is modified to include the cluster subnets +func GetProxyEnvs(cfg *config.Cluster) map[string]string { + return getProxyEnvs(cfg, os.Getenv) +} + +func getProxyEnvs(cfg *config.Cluster, getEnv func(string) string) map[string]string { + envs := make(map[string]string) + for _, name := range []string{HTTPProxy, HTTPSProxy, NOProxy} { + val := getEnv(name) + if val == "" { + val = getEnv(strings.ToLower(name)) + } + if val != "" { + envs[name] = val + envs[strings.ToLower(name)] = val + } + } + // Specifically add the cluster subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + noProxy := envs[NOProxy] + if noProxy != "" { + noProxy += "," + } + noProxy += cfg.Networking.ServiceSubnet + "," + cfg.Networking.PodSubnet + envs[NOProxy] = noProxy + envs[strings.ToLower(NOProxy)] = noProxy + } + return envs +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS new file mode 100644 index 000000000..33e190320 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS @@ -0,0 +1,2 @@ +labels: +- area/provider/docker diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go new file mode 100644 index 000000000..40fd79d9d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +// clusterLabelKey is applied to each "node" docker container for identification +const clusterLabelKey = "io.x-k8s.kind.cluster" + +// nodeRoleLabelKey is applied to each "node" docker container for categorization +// of nodes by role +const nodeRoleLabelKey = "io.x-k8s.kind.role" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go new file mode 100644 index 000000000..8ad37d073 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// ensureNodeImages ensures that the node images used by the create +// configuration are present +func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster) error { + // pull each required image + for _, image := range common.RequiredNodeImages(cfg).List() { + // prints user friendly message + friendlyImageName, image := sanitizeImage(image) + status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName)) + if _, err := pullIfNotPresent(logger, image, 4); err != nil { + status.End(false) + return err + } + } + return nil +} + +// pullIfNotPresent will pull an image if it is not present locally +// retrying up to retries times +// it returns true if it attempted to pull, and any errors from pulling +func pullIfNotPresent(logger log.Logger, image string, retries int) (pulled bool, err error) { + // TODO(bentheelder): switch most (all) of the logging here to debug level + // once we have configurable log levels + // if this did not return an error, then the image exists locally + cmd := exec.Command("docker", "inspect", "--type=image", image) + if err := cmd.Run(); err == nil { + logger.V(1).Infof("Image: %s present locally", image) + return false, nil + } + // otherwise try to pull it + return true, pull(logger, image, retries) +} + +// pull pulls an image, retrying up to retries times +func pull(logger log.Logger, image string, retries int) error { + logger.V(1).Infof("Pulling image: %s ...", image) + err := exec.Command("docker", "pull", image).Run() + // retry pulling up to retries times if necessary + if err != nil { + for i := 0; i < retries; i++ { + time.Sleep(time.Second * time.Duration(i+1)) + logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err) + // TODO(bentheelder): add some backoff / sleep? + err = exec.Command("docker", "pull", image).Run() + if err == nil { + break + } + } + } + return errors.Wrapf(err, "failed to pull image %q", image) +} + +// sanitizeImage is a helper to return human readable image name and +// the docker pullable image name from the provided image +func sanitizeImage(image string) (string, string) { + if strings.Contains(image, "@sha256:") { + return strings.Split(image, "@sha256:")[0], image + } + return image, image +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go new file mode 100644 index 000000000..f43284610 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go @@ -0,0 +1,329 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "net" + "regexp" + "sort" + "strconv" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// This may be overridden by KIND_EXPERIMENTAL_DOCKER_NETWORK env, +// experimentally... +// +// By default currently picking a single network is equivalent to the previous +// behavior *except* that we moved from the default bridge to a user defined +// network because the default bridge is actually special versus any other +// docker network and lacks the embedded DNS +// +// For now this also makes it easier for apps to join the same network, and +// leaves users with complex networking desires to create and manage their own +// networks. +const fixedNetworkName = "kind" + +// ensureNetwork checks if docker network by name exists, if not it creates it +func ensureNetwork(name string) error { + // check if network exists already and remove any duplicate networks + exists, err := removeDuplicateNetworks(name) + if err != nil { + return err + } + + // network already exists, we're good + // TODO: the network might already exist and not have ipv6 ... :| + // discussion: https://github.com/kubernetes-sigs/kind/pull/1508#discussion_r414594198 + if exists { + return nil + } + + // Generate unique subnet per network based on the name + // obtained from the ULA fc00::/8 range + // Use the MTU configured for the docker default network + // Make N attempts with "probing" in case we happen to collide + subnet := generateULASubnetFromName(name, 0) + mtu := getDefaultNetworkMTU() + err = createNetworkNoDuplicates(name, subnet, mtu) + if err == nil { + // Success! + return nil + } + + // On the first try check if ipv6 fails entirely on this machine + // https://github.com/kubernetes-sigs/kind/issues/1544 + // Otherwise if it's not a pool overlap error, fail + // If it is, make more attempts below + if isIPv6UnavailableError(err) { + // only one attempt, IPAM is automatic in ipv4 only + return createNetworkNoDuplicates(name, "", mtu) + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll start trying with different subnets + } else { + // unknown error ... + return err + } + + // keep trying for ipv6 subnets + const maxAttempts = 5 + for attempt := int32(1); attempt < maxAttempts; attempt++ { + subnet := generateULASubnetFromName(name, attempt) + err = createNetworkNoDuplicates(name, subnet, mtu) + if err == nil { + // success! + return nil + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll try again + continue + } + // unknown error ... + return err + } + return errors.New("exhausted attempts trying to find a non-overlapping subnet") +} + +func createNetworkNoDuplicates(name, ipv6Subnet string, mtu int) error { + if err := createNetwork(name, ipv6Subnet, mtu); err != nil && !isNetworkAlreadyExistsError(err) { + return err + } + _, err := removeDuplicateNetworks(name) + return err +} + +func removeDuplicateNetworks(name string) (bool, error) { + networks, err := sortedNetworksWithName(name) + if err != nil { + return false, err + } + if len(networks) > 1 { + if err := deleteNetworks(networks[1:]...); err != nil && !isOnlyErrorNoSuchNetwork(err) { + return false, err + } + } + return len(networks) > 0, nil +} + +func createNetwork(name, ipv6Subnet string, mtu int) error { + args := []string{"network", "create", "-d=bridge", + "-o", "com.docker.network.bridge.enable_ip_masquerade=true", + } + if mtu > 0 { + args = append(args, "-o", fmt.Sprintf("com.docker.network.driver.mtu=%d", mtu)) + } + if ipv6Subnet != "" { + args = append(args, "--ipv6", "--subnet", ipv6Subnet) + } + args = append(args, name) + return exec.Command("docker", args...).Run() +} + +// getDefaultNetworkMTU obtains the MTU from the docker default network +func getDefaultNetworkMTU() int { + cmd := exec.Command("docker", "network", "inspect", "bridge", + "-f", `{{ index .Options "com.docker.network.driver.mtu" }}`) + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return 0 + } + mtu, err := strconv.Atoi(lines[0]) + if err != nil { + return 0 + } + return mtu +} + +func sortedNetworksWithName(name string) ([]string, error) { + // query which networks exist with the name + ids, err := networksWithName(name) + if err != nil { + return nil, err + } + // we can skip sorting if there are less than 2 + if len(ids) < 2 { + return ids, nil + } + // inspect them to get more detail for sorting + networks, err := inspectNetworks(ids) + if err != nil { + return nil, err + } + // deterministically sort networks + // NOTE: THIS PART IS IMPORTANT! + sortNetworkInspectEntries(networks) + // return network IDs + sortedIDs := make([]string, 0, len(networks)) + for i := range networks { + sortedIDs = append(sortedIDs, networks[i].ID) + } + return sortedIDs, nil +} + +func sortNetworkInspectEntries(networks []networkInspectEntry) { + sort.Slice(networks, func(i, j int) bool { + // we want networks with active containers first + if len(networks[i].Containers) > len(networks[j].Containers) { + return true + } + return networks[i].ID < networks[j].ID + }) +} + +func inspectNetworks(networkIDs []string) ([]networkInspectEntry, error) { + inspectOut, err := exec.Output(exec.Command("docker", append([]string{"network", "inspect"}, networkIDs...)...)) + // NOTE: the caller can detect if the network isn't present in the output anyhow + // we don't want to fail on this here. + if err != nil && !isOnlyErrorNoSuchNetwork(err) { + return nil, err + } + // parse + networks := []networkInspectEntry{} + if err := json.Unmarshal(inspectOut, &networks); err != nil { + return nil, errors.Wrap(err, "failed to decode networks list") + } + return networks, nil +} + +type networkInspectEntry struct { + ID string `json:"Id"` + // NOTE: we don't care about the contents here but we need to parse + // how many entries exist in the containers map + Containers map[string]map[string]string `json:"Containers"` +} + +// networksWithName returns a list of network IDs for networks with this name +func networksWithName(name string) ([]string, error) { + lsOut, err := exec.Output(exec.Command( + "docker", "network", "ls", + "--filter=name=^"+regexp.QuoteMeta(name)+"$", + "--format={{.ID}}", // output as unambiguous IDs + )) + if err != nil { + return nil, err + } + cleaned := strings.TrimSuffix(string(lsOut), "\n") + if cleaned == "" { // avoid returning []string{""} + return nil, nil + } + return strings.Split(cleaned, "\n"), nil +} + +func checkIfNetworkExists(name string) (bool, error) { + out, err := exec.Output(exec.Command( + "docker", "network", "ls", + "--filter=name=^"+regexp.QuoteMeta(name)+"$", + "--format={{.Name}}", + )) + return strings.HasPrefix(string(out), name), err +} + +func isIPv6UnavailableError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Cannot read IPv6 setup for bridge") +} + +func isPoolOverlapError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Pool overlaps with other one on this address space") || strings.Contains(string(rerr.Output), "networks have overlapping") +} + +func isNetworkAlreadyExistsError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: network with name") && strings.Contains(string(rerr.Output), "already exists") +} + +// returns true if: +// - err only contains no such network errors +func isOnlyErrorNoSuchNetwork(err error) bool { + rerr := exec.RunErrorForError(err) + if rerr == nil { + return false + } + // check all lines of output from errored command + b := bytes.NewBuffer(rerr.Output) + for { + l, err := b.ReadBytes('\n') + if err == io.EOF { + break + } else if err != nil { + return false + } + // if the line begins with Error: No such network: it's fine + s := string(l) + if strings.HasPrefix(s, "Error: No such network:") { + continue + } + // other errors are not fine + if strings.HasPrefix(s, "Error: ") { + return false + } + // other line contents should just be network references + } + return true +} + +func deleteNetworks(networks ...string) error { + return exec.Command("docker", append([]string{"network", "rm"}, networks...)...).Run() +} + +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go new file mode 100644 index 000000000..9bad7ed01 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// nodes.Node implementation for the docker provider +type node struct { + name string +} + +func (n *node) String() string { + return n.name +} + +func (n *node) Role() (string, error) { + cmd := exec.Command("docker", "inspect", + "--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey), + n.name, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get role for node") + } + if len(lines) != 1 { + return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines)) + } + return lines[0], nil +} + +func (n *node) IP() (ipv4 string, ipv6 string, err error) { + // retrieve the IP address of the node using docker inspect + cmd := exec.Command("docker", "inspect", + "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}", + n.name, // ... against the "node" container + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", "", errors.Wrap(err, "failed to get container details") + } + if len(lines) != 1 { + return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + ips := strings.Split(lines[0], ",") + if len(ips) != 2 { + return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips)) + } + return ips[0], ips[1], nil +} + +func (n *node) Command(command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + } +} + +func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + ctx: ctx, + } +} + +// nodeCmd implements exec.Cmd for docker nodes +type nodeCmd struct { + nameOrID string // the container name or ID + command string + args []string + env []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + ctx context.Context +} + +func (c *nodeCmd) Run() error { + args := []string{ + "exec", + // run with privileges so we can remount etc.. + // this might not make sense in the most general sense, but it is + // important to many kind commands + "--privileged", + } + if c.stdin != nil { + args = append(args, + "-i", // interactive so we can supply input + ) + } + // set env + for _, env := range c.env { + args = append(args, "-e", env) + } + // specify the container and command, after this everything will be + // args the command in the container rather than to docker + args = append( + args, + c.nameOrID, // ... against the container + c.command, // with the command specified + ) + args = append( + args, + // finally, with the caller args + c.args..., + ) + var cmd exec.Cmd + if c.ctx != nil { + cmd = exec.CommandContext(c.ctx, "docker", args...) + } else { + cmd = exec.Command("docker", args...) + } + if c.stdin != nil { + cmd.SetStdin(c.stdin) + } + if c.stderr != nil { + cmd.SetStderr(c.stderr) + } + if c.stdout != nil { + cmd.SetStdout(c.stdout) + } + return cmd.Run() +} + +func (c *nodeCmd) SetEnv(env ...string) exec.Cmd { + c.env = env + return c +} + +func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd { + c.stdin = r + return c +} + +func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd { + c.stdout = w + return c +} + +func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd { + c.stderr = w + return c +} + +func (n *node) SerialLogs(w io.Writer) error { + return exec.Command("docker", "logs", n.name).SetStdout(w).SetStderr(w).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go new file mode 100644 index 000000000..b1786fbc9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go @@ -0,0 +1,344 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/sets" +) + +// NewProvider returns a new provider based on executing `docker ...` +func NewProvider(logger log.Logger) providers.Provider { + return &provider{ + logger: logger, + } +} + +// Provider implements provider.Provider +// see NewProvider +type provider struct { + logger log.Logger + info *providers.ProviderInfo +} + +// String implements fmt.Stringer +// NOTE: the value of this should not currently be relied upon for anything! +// This is only used for setting the Node's providerID +func (p *provider) String() string { + return "docker" +} + +// Provision is part of the providers.Provider interface +func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) { + // TODO: validate cfg + // ensure node images are pulled before actually provisioning + if err := ensureNodeImages(p.logger, status, cfg); err != nil { + return err + } + + // ensure the pre-requisite network exists + networkName := fixedNetworkName + if n := os.Getenv("KIND_EXPERIMENTAL_DOCKER_NETWORK"); n != "" { + p.logger.Warn("WARNING: Overriding docker network due to KIND_EXPERIMENTAL_DOCKER_NETWORK") + p.logger.Warn("WARNING: Here be dragons! This is not supported currently.") + networkName = n + } + if err := ensureNetwork(networkName); err != nil { + return errors.Wrap(err, "failed to ensure docker network") + } + + // actually provision the cluster + icons := strings.Repeat("📦 ", len(cfg.Nodes)) + status.Start(fmt.Sprintf("Preparing nodes %s", icons)) + defer func() { status.End(err == nil) }() + + // plan creating the containers + createContainerFuncs, err := planCreation(cfg, networkName) + if err != nil { + return err + } + + // actually create nodes + return errors.UntilErrorConcurrent(createContainerFuncs) +} + +// ListClusters is part of the providers.Provider interface +func (p *provider) ListClusters() ([]string, error) { + cmd := exec.Command("docker", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", "label="+clusterLabelKey, + // format to include the cluster name + "--format", fmt.Sprintf(`{{.Label "%s"}}`, clusterLabelKey), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + return sets.NewString(lines...).List(), nil +} + +// ListNodes is part of the providers.Provider interface +func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) { + cmd := exec.Command("docker", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster), + // format to include the cluster name + "--format", `{{.Names}}`, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list nodes") + } + // convert names to node handles + ret := make([]nodes.Node, 0, len(lines)) + for _, name := range lines { + ret = append(ret, p.node(name)) + } + return ret, nil +} + +// DeleteNodes is part of the providers.Provider interface +func (p *provider) DeleteNodes(n []nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "docker" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + return nil +} + +// GetAPIServerEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + + // if the 'desktop.docker.io/ports//tcp' label is present, + // defer to its value for the api server endpoint + // + // For example: + // "Labels": { + // "desktop.docker.io/ports/6443/tcp": "10.0.1.7:6443", + // } + cmd := exec.Command( + "docker", "inspect", + "--format", fmt.Sprintf( + "{{ index .Config.Labels \"desktop.docker.io/ports/%d/tcp\" }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) == 1 && lines[0] != "" { + return lines[0], nil + } + + // else, retrieve the specific port mapping via NetworkSettings.Ports + cmd = exec.Command( + "docker", "inspect", + "--format", fmt.Sprintf( + "{{ with (index (index .NetworkSettings.Ports \"%d/tcp\") 0) }}{{ printf \"%%s\t%%s\" .HostIp .HostPort }}{{ end }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err = exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + parts := strings.Split(lines[0], "\t") + if len(parts) != 2 { + return "", errors.Errorf("network details should only be two parts, got %d", len(parts)) + } + + // join host and port + return net.JoinHostPort(parts[0], parts[1]), nil +} + +// GetAPIServerInternalEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + // NOTE: we're using the nodes's hostnames which are their names + return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil +} + +// node returns a new node handle for this provider +func (p *provider) node(name string) nodes.Node { + return &node{ + name: name, + } +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := common.FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + // construct a slice of methods to collect logs + fns := []func() error{ + // record info about the host docker + execToPathFn( + exec.Command("docker", "info"), + filepath.Join(dir, "docker-info.txt"), + ), + } + + // collect /var/log for each node and plan collecting more logs + var errs []error + for _, n := range nodes { + node := n // https://golang.org/doc/faq#closures_and_goroutines + name := node.String() + path := filepath.Join(dir, name) + if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil { + errs = append(errs, err) + } + + fns = append(fns, + func() error { return common.CollectLogs(node, path) }, + execToPathFn(exec.Command("docker", "inspect", name), filepath.Join(path, "inspect.json")), + func() error { + f, err := common.FileOnHost(filepath.Join(path, "serial.log")) + if err != nil { + return err + } + defer f.Close() + return node.SerialLogs(f) + }, + ) + } + + // run and collect up all errors + errs = append(errs, errors.AggregateConcurrent(fns)) + return errors.NewAggregate(errs) +} + +// Info returns the provider info. +// The info is cached on the first time of the execution. +func (p *provider) Info() (*providers.ProviderInfo, error) { + var err error + if p.info == nil { + p.info, err = info() + } + return p.info, err +} + +// dockerInfo corresponds to `docker info --format '{{json .}}'` +type dockerInfo struct { + CgroupDriver string `json:"CgroupDriver"` // "systemd", "cgroupfs", "none" + CgroupVersion string `json:"CgroupVersion"` // e.g. "2" + MemoryLimit bool `json:"MemoryLimit"` + PidsLimit bool `json:"PidsLimit"` + CPUShares bool `json:"CPUShares"` + SecurityOptions []string `json:"SecurityOptions"` +} + +func info() (*providers.ProviderInfo, error) { + cmd := exec.Command("docker", "info", "--format", "{{json .}}") + out, err := exec.Output(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get docker info") + } + var dInfo dockerInfo + if err := json.Unmarshal(out, &dInfo); err != nil { + return nil, err + } + info := providers.ProviderInfo{ + Cgroup2: dInfo.CgroupVersion == "2", + } + // When CgroupDriver == "none", the MemoryLimit/PidsLimit/CPUShares + // values are meaningless and need to be considered false. + // https://github.com/moby/moby/issues/42151 + if dInfo.CgroupDriver != "none" { + info.SupportsMemoryLimit = dInfo.MemoryLimit + info.SupportsPidsLimit = dInfo.PidsLimit + info.SupportsCPUShares = dInfo.CPUShares + } + for _, o := range dInfo.SecurityOptions { + // o is like "name=seccomp,profile=default", or "name=rootless", + csvReader := csv.NewReader(strings.NewReader(o)) + sliceSlice, err := csvReader.ReadAll() + if err != nil { + return nil, err + } + for _, f := range sliceSlice { + for _, ff := range f { + if ff == "name=rootless" { + info.Rootless = true + } + } + } + } + return &info, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go new file mode 100644 index 000000000..6c644a365 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go @@ -0,0 +1,418 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/fs" + + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// planCreation creates a slice of funcs that will create the containers +func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) { + // we need to know all the names for NO_PROXY + // compute the names first before any actual node details + nodeNamer := common.MakeNodeNamer(cfg.Name) + names := make([]string, len(cfg.Nodes)) + for i, node := range cfg.Nodes { + name := nodeNamer(string(node.Role)) // name the node + names[i] = name + } + haveLoadbalancer := config.ClusterHasImplicitLoadBalancer(cfg) + if haveLoadbalancer { + names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue)) + } + + // these apply to all container creation + genericArgs, err := commonArgs(cfg.Name, cfg, networkName, names) + if err != nil { + return nil, err + } + + // only the external LB should reflect the port if we have multiple control planes + apiServerPort := cfg.Networking.APIServerPort + apiServerAddress := cfg.Networking.APIServerAddress + if haveLoadbalancer { + // TODO: picking ports locally is less than ideal with remote docker + // but this is supposed to be an implementation detail and NOT picking + // them breaks host reboot ... + // For now remote docker + multi control plane is not supported + apiServerPort = 0 // replaced with random ports + apiServerAddress = "127.0.0.1" // only the LB needs to be non-local + // only for IPv6 only clusters + if cfg.Networking.IPFamily == config.IPv6Family { + apiServerAddress = "::1" // only the LB needs to be non-local + } + // plan loadbalancer node + name := names[len(names)-1] + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForLoadBalancer(cfg, name, genericArgs) + if err != nil { + return err + } + return createContainer(name, args) + }) + } + + // plan normal nodes + for i, node := range cfg.Nodes { + node := node.DeepCopy() // copy so we can modify + name := names[i] + + // fixup relative paths, docker can only handle absolute paths + for m := range node.ExtraMounts { + hostPath := node.ExtraMounts[m].HostPath + if !fs.IsAbs(hostPath) { + absHostPath, err := filepath.Abs(hostPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath) + } + node.ExtraMounts[m].HostPath = absHostPath + } + } + + // plan actual creation based on role + switch node.Role { + case config.ControlPlaneRole: + createContainerFuncs = append(createContainerFuncs, func() error { + node.ExtraPortMappings = append(node.ExtraPortMappings, + config.PortMapping{ + ListenAddress: apiServerAddress, + HostPort: apiServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + case config.WorkerRole: + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + default: + return nil, errors.Errorf("unknown node role: %q", node.Role) + } + } + return createContainerFuncs, nil +} + +// commonArgs computes static arguments that apply to all containers +func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNames []string) ([]string, error) { + // standard arguments all nodes containers need, computed once + args := []string{ + "--detach", // run the container detached + "--tty", // allocate a tty for entrypoint logs + // label the node with the cluster ID + "--label", fmt.Sprintf("%s=%s", clusterLabelKey, cluster), + // user a user defined docker network so we get embedded DNS + "--net", networkName, + // Docker supports the following restart modes: + // - no + // - on-failure[:max-retries] + // - unless-stopped + // - always + // https://docs.docker.com/engine/reference/commandline/run/#restart-policies---restart + // + // What we desire is: + // - restart on host / dockerd reboot + // - don't restart for any other reason + // + // This means: + // - no is out of the question ... it never restarts + // - always is a poor choice, we'll keep trying to restart nodes that were + // never going to work + // - unless-stopped will also retry failures indefinitely, similar to always + // except that it won't restart when the container is `docker stop`ed + // - on-failure is not great, we're only interested in restarting on + // reboots, not failures. *however* we can limit the number of retries + // *and* it forgets all state on dockerd restart and retries anyhow. + // - on-failure:0 is what we want .. restart on failures, except max + // retries is 0, so only restart on reboots. + // however this _actually_ means the same thing as always + // so the closest thing is on-failure:1, which will retry *once* + "--restart=on-failure:1", + // this can be enabled by default in docker daemon.json, so we explicitly + // disable it, we want our entrypoint to be PID1, not docker-init / tini + "--init=false", + // note: requires API v1.41+ from Dec 2020 in Docker 20.10.0 + // this is the default with cgroups v2 but not with cgroups v1, unless + // overridden in the daemon --default-cgroupns-mode + // https://github.com/docker/cli/pull/3699#issuecomment-1191675788 + "--cgroupns=private", + } + + // enable IPv6 if necessary + if config.ClusterHasIPv6(cfg) { + args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") + } + + // pass proxy environment variables + proxyEnv, err := getProxyEnv(cfg, networkName, nodeNames) + if err != nil { + return nil, errors.Wrap(err, "proxy setup error") + } + for key, val := range proxyEnv { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, val)) + } + + // handle hosts that have user namespace remapping enabled + if usernsRemap() { + args = append(args, "--userns=host") + } + + // handle Docker on Btrfs or ZFS + // https://github.com/kubernetes-sigs/kind/issues/1416#issuecomment-606514724 + if mountDevMapper() { + args = append(args, "--volume", "/dev/mapper:/dev/mapper") + } + + // enable /dev/fuse explicitly for fuse-overlayfs + // (Rootless Docker does not automatically mount /dev/fuse with --privileged) + if mountFuse() { + args = append(args, "--device", "/dev/fuse") + } + + if cfg.Networking.DNSSearch != nil { + args = append(args, "-e", "KIND_DNS_SEARCH="+strings.Join(*cfg.Networking.DNSSearch, " ")) + } + + return args, nil +} + +func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), + // running containers in a container requires privileged + // NOTE: we could try to replicate this with --cap-add, and use less + // privileges, but this flag also changes some mounts that are necessary + // including some ones docker would otherwise do by default. + // for now this is what we want. in the future we may revisit this. + "--privileged", + "--security-opt", "seccomp=unconfined", // also ignore seccomp + "--security-opt", "apparmor=unconfined", // also ignore apparmor + // runtime temporary storage + "--tmpfs", "/tmp", // various things depend on working /tmp + "--tmpfs", "/run", // systemd wants a writable /run + // runtime persistent storage + // this ensures that E.G. pods, logs etc. are not on the container + // filesystem, which is not only better for performance, but allows + // running kind in kind for "party tricks" + // (please don't depend on doing this though!) + "--volume", "/var", + // some k8s things want to read /lib/modules + "--volume", "/lib/modules:/lib/modules:ro", + // propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script + "-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER", + }, + args..., + ) + + // convert mounts and port mappings to container run args + args = append(args, generateMountBindings(node.ExtraMounts...)...) + mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + switch node.Role { + case config.ControlPlaneRole: + args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf") + } + + // finally, specify the image to run + return append(args, node.Image), nil +} + +func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), + }, + args..., + ) + + // load balancer port mapping + mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily, + config.PortMapping{ + ListenAddress: cfg.Networking.APIServerAddress, + HostPort: cfg.Networking.APIServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + return append(args, loadbalancer.Image), nil +} + +func getProxyEnv(cfg *config.Cluster, networkName string, nodeNames []string) (map[string]string, error) { + envs := common.GetProxyEnvs(cfg) + // Specifically add the docker network subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + subnets, err := getSubnets(networkName) + if err != nil { + return nil, err + } + + noProxyList := append(subnets, envs[common.NOProxy]) + noProxyList = append(noProxyList, nodeNames...) + // Add pod and service dns names to no_proxy to allow in cluster + // Note: this is best effort based on the default CoreDNS spec + // https://github.com/kubernetes/dns/blob/master/docs/specification.md + // Any user created pod/service hostnames, namespaces, custom DNS services + // are expected to be no-proxied by the user explicitly. + noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local") + noProxyJoined := strings.Join(noProxyList, ",") + envs[common.NOProxy] = noProxyJoined + envs[strings.ToLower(common.NOProxy)] = noProxyJoined + } + return envs, nil +} + +func getSubnets(networkName string) ([]string, error) { + format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}` + cmd := exec.Command("docker", "network", "inspect", "-f", format, networkName) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get subnets") + } + return strings.Split(strings.TrimSpace(lines[0]), " "), nil +} + +// generateMountBindings converts the mount list to a list of args for docker +// ':[:options]', where 'options' +// is a comma-separated list of the following strings: +// 'ro', if the path is read only +// 'Z', if the volume requires SELinux relabeling +func generateMountBindings(mounts ...config.Mount) []string { + args := make([]string, 0, len(mounts)) + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + var attrs []string + if m.Readonly { + attrs = append(attrs, "ro") + } + // Only request relabeling if the pod provides an SELinux context. If the pod + // does not provide an SELinux context relabeling will label the volume with + // the container's randomly allocated MCS label. This would restrict access + // to the volume to the container which mounts it first. + if m.SelinuxRelabel { + attrs = append(attrs, "Z") + } + switch m.Propagation { + case config.MountPropagationNone: + // noop, private is default + case config.MountPropagationBidirectional: + attrs = append(attrs, "rshared") + case config.MountPropagationHostToContainer: + attrs = append(attrs, "rslave") + default: // Falls back to "private" + } + if len(attrs) > 0 { + bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ",")) + } + args = append(args, fmt.Sprintf("--volume=%s", bind)) + } + return args +} + +// generatePortMappings converts the portMappings list to a list of args for docker +func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) { + args := make([]string, 0, len(portMappings)) + for _, pm := range portMappings { + // do provider internal defaulting + // in a future API revision we will handle this at the API level and remove this + if pm.ListenAddress == "" { + switch clusterIPFamily { + case config.IPv4Family, config.DualStackFamily: + pm.ListenAddress = "0.0.0.0" // this is the docker default anyhow + case config.IPv6Family: + pm.ListenAddress = "::" + default: + return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily) + } + } + if string(pm.Protocol) == "" { + pm.Protocol = config.PortMappingProtocolTCP // TCP is the default + } + + // validate that the provider can handle this binding + switch pm.Protocol { + case config.PortMappingProtocolTCP: + case config.PortMappingProtocolUDP: + case config.PortMappingProtocolSCTP: + default: + return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol) + } + + // get a random port if necessary (port = 0) + hostPort, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to get random host port for port mapping") + } + + // generate the actual mapping arg + protocol := string(pm.Protocol) + hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort)) + args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, protocol)) + } + return args, nil +} + +func createContainer(name string, args []string) error { + if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + return nil +} + +func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error { + if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + + logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second) + logCmd := exec.CommandContext(logCtx, "docker", "logs", "-f", name) + defer logCancel() + return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp()) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go new file mode 100644 index 000000000..2ec86d73f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "encoding/json" + "strings" + + "sigs.k8s.io/kind/pkg/exec" +) + +// IsAvailable checks if docker is available in the system +func IsAvailable() bool { + cmd := exec.Command("docker", "-v") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + return strings.HasPrefix(lines[0], "Docker version") +} + +// usernsRemap checks if userns-remap is enabled in dockerd +func usernsRemap() bool { + cmd := exec.Command("docker", "info", "--format", "'{{json .SecurityOptions}}'") + lines, err := exec.OutputLines(cmd) + if err != nil { + return false + } + if len(lines) > 0 { + if strings.Contains(lines[0], "name=userns") { + return true + } + } + return false +} + +// mountDevMapper checks if the Docker storage driver is Btrfs or ZFS +// or if the backing filesystem is Btrfs +func mountDevMapper() bool { + storage := "" + // check the docker storage driver + cmd := exec.Command("docker", "info", "-f", "{{.Driver}}") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + + storage = strings.ToLower(strings.TrimSpace(lines[0])) + if storage == "btrfs" || storage == "zfs" || storage == "devicemapper" { + return true + } + + // check the backing file system + // docker info -f '{{json .DriverStatus }}' + // [["Backing Filesystem","extfs"],["Supports d_type","true"],["Native Overlay Diff","true"]] + cmd = exec.Command("docker", "info", "-f", "{{json .DriverStatus }}") + lines, err = exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + var dat [][]string + if err := json.Unmarshal([]byte(lines[0]), &dat); err != nil { + return false + } + for _, item := range dat { + if item[0] == "Backing Filesystem" { + storage = strings.ToLower(item[1]) + break + } + } + + return storage == "btrfs" || storage == "zfs" || storage == "xfs" +} + +// rootless: use fuse-overlayfs by default +// https://github.com/kubernetes-sigs/kind/issues/2275 +func mountFuse() bool { + i, err := info() + if err != nil { + return false + } + if i != nil && i.Rootless { + return true + } + return false +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS new file mode 100644 index 000000000..167d41b76 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - aojea + - BenTheElder +approvers: + - aojea + - BenTheElder +emeritus_approvers: + - amwat + +labels: + - area/provider/podman diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go new file mode 100644 index 000000000..e30f167d2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +// clusterLabelKey is applied to each "node" podman container for identification +const clusterLabelKey = "io.x-k8s.kind.cluster" + +// nodeRoleLabelKey is applied to each "node" podman container for categorization +// of nodes by role +const nodeRoleLabelKey = "io.x-k8s.kind.role" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go new file mode 100644 index 000000000..c44fead50 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go @@ -0,0 +1,115 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// ensureNodeImages ensures that the node images used by the create +// configuration are present +func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster) error { + // pull each required image + for _, image := range common.RequiredNodeImages(cfg).List() { + // prints user friendly message + friendlyImageName, image := sanitizeImage(image) + status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName)) + if _, err := pullIfNotPresent(logger, image, 4); err != nil { + status.End(false) + return err + } + } + return nil +} + +// pullIfNotPresent will pull an image if it is not present locally +// retrying up to retries times +// it returns true if it attempted to pull, and any errors from pulling +func pullIfNotPresent(logger log.Logger, image string, retries int) (pulled bool, err error) { + // TODO(bentheelder): switch most (all) of the logging here to debug level + // once we have configurable log levels + // if this did not return an error, then the image exists locally + cmd := exec.Command("podman", "inspect", "--type=image", image) + if err := cmd.Run(); err == nil { + logger.V(1).Infof("Image: %s present locally", image) + return false, nil + } + // otherwise try to pull it + return true, pull(logger, image, retries) +} + +// pull pulls an image, retrying up to retries times +func pull(logger log.Logger, image string, retries int) error { + logger.V(1).Infof("Pulling image: %s ...", image) + err := exec.Command("podman", "pull", image).Run() + // retry pulling up to retries times if necessary + if err != nil { + for i := 0; i < retries; i++ { + time.Sleep(time.Second * time.Duration(i+1)) + logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err) + // TODO(bentheelder): add some backoff / sleep? + err = exec.Command("podman", "pull", image).Run() + if err == nil { + break + } + } + } + return errors.Wrapf(err, "failed to pull image %q", image) +} + +// sanitizeImage is a helper to return human readable image name and +// the podman pullable image name from the provided image +func sanitizeImage(image string) (friendlyImageName, pullImageName string) { + const ( + defaultDomain = "docker.io/" + officialRepoName = "library" + ) + + var remainder string + + if strings.Contains(image, "@sha256:") { + splits := strings.Split(image, "@sha256:") + friendlyImageName = splits[0] + remainder = strings.Split(splits[0], ":")[0] + "@sha256:" + splits[1] + } else { + friendlyImageName = image + remainder = image + } + + if !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + + i := strings.IndexRune(friendlyImageName, '/') + if i == -1 || (!strings.ContainsAny(friendlyImageName[:i], ".:") && friendlyImageName[:i] != "localhost") { + pullImageName = defaultDomain + remainder + } else { + pullImageName = remainder + } + + return +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go new file mode 100644 index 000000000..ef138bcac --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go @@ -0,0 +1,146 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "crypto/sha1" + "encoding/binary" + "net" + "regexp" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// This may be overridden by KIND_EXPERIMENTAL_PODMAN_NETWORK env, +// experimentally... +// +// By default currently picking a single network is equivalent to the previous +// behavior *except* that we moved from the default bridge to a user defined +// network because the default bridge is actually special versus any other +// docker network and lacks the embedded DNS +// +// For now this also makes it easier for apps to join the same network, and +// leaves users with complex networking desires to create and manage their own +// networks. +const fixedNetworkName = "kind" + +// ensureNetwork creates a new network +// podman only creates IPv6 networks for versions >= 2.2.0 +func ensureNetwork(name string) error { + // network already exists + if checkIfNetworkExists(name) { + return nil + } + + // generate unique subnet per network based on the name + // obtained from the ULA fc00::/8 range + // Make N attempts with "probing" in case we happen to collide + subnet := generateULASubnetFromName(name, 0) + err := createNetwork(name, subnet) + if err == nil { + // Success! + return nil + } + + if isUnknownIPv6FlagError(err) || + isIPv6DisabledError(err) { + return createNetwork(name, "") + } + + // Only continue if the error is because of the subnet range + // is already allocated + if !isPoolOverlapError(err) { + return err + } + + // keep trying for ipv6 subnets + const maxAttempts = 5 + for attempt := int32(1); attempt < maxAttempts; attempt++ { + subnet := generateULASubnetFromName(name, attempt) + err = createNetwork(name, subnet) + if err == nil { + // success! + return nil + } else if !isPoolOverlapError(err) { + // unknown error ... + return err + } + } + return errors.New("exhausted attempts trying to find a non-overlapping subnet") + +} + +func createNetwork(name, ipv6Subnet string) error { + if ipv6Subnet == "" { + return exec.Command("podman", "network", "create", "-d=bridge", name).Run() + } + return exec.Command("podman", "network", "create", "-d=bridge", + "--ipv6", "--subnet", ipv6Subnet, name).Run() +} + +func checkIfNetworkExists(name string) bool { + _, err := exec.Output(exec.Command( + "podman", "network", "inspect", + regexp.QuoteMeta(name), + )) + return err == nil +} + +func isUnknownIPv6FlagError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && + strings.Contains(string(rerr.Output), "unknown flag: --ipv6") +} + +func isIPv6DisabledError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && + strings.Contains(string(rerr.Output), "is ipv6 enabled in the kernel") +} + +func isPoolOverlapError(err error) bool { + rerr := exec.RunErrorForError(err) + if rerr == nil { + return false + } + output := string(rerr.Output) + return strings.Contains(output, "is already used on the host or by another config") || + strings.Contains(output, "is being used by a network interface") || + strings.Contains(output, "is already being used by a cni configuration") +} + +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go new file mode 100644 index 000000000..5285dd224 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "context" + "fmt" + "io" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// nodes.Node implementation for the podman provider +type node struct { + name string +} + +func (n *node) String() string { + return n.name +} + +func (n *node) Role() (string, error) { + cmd := exec.Command("podman", "inspect", + "--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey), + n.name, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get role for node") + } + if len(lines) != 1 { + return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines)) + } + return lines[0], nil +} + +func (n *node) IP() (ipv4 string, ipv6 string, err error) { + // retrieve the IP address of the node using podman inspect + cmd := exec.Command("podman", "inspect", + "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}", + n.name, // ... against the "node" container + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", "", errors.Wrap(err, "failed to get container details") + } + if len(lines) != 1 { + return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + ips := strings.Split(lines[0], ",") + if len(ips) != 2 { + return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips)) + } + return ips[0], ips[1], nil +} + +func (n *node) Command(command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + } +} + +func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + ctx: ctx, + } +} + +// nodeCmd implements exec.Cmd for podman nodes +type nodeCmd struct { + nameOrID string // the container name or ID + command string + args []string + env []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + ctx context.Context +} + +func (c *nodeCmd) Run() error { + args := []string{ + "exec", + // run with privileges so we can remount etc.. + // this might not make sense in the most general sense, but it is + // important to many kind commands + "--privileged", + } + if c.stdin != nil { + args = append(args, + "-i", // interactive so we can supply input + ) + } + // set env + for _, env := range c.env { + args = append(args, "-e", env) + } + // specify the container and command, after this everything will be + // args the command in the container rather than to podman + args = append( + args, + c.nameOrID, // ... against the container + c.command, // with the command specified + ) + args = append( + args, + // finally, with the caller args + c.args..., + ) + var cmd exec.Cmd + if c.ctx != nil { + cmd = exec.CommandContext(c.ctx, "podman", args...) + } else { + cmd = exec.Command("podman", args...) + } + if c.stdin != nil { + cmd.SetStdin(c.stdin) + } + if c.stderr != nil { + cmd.SetStderr(c.stderr) + } + if c.stdout != nil { + cmd.SetStdout(c.stdout) + } + return cmd.Run() +} + +func (c *nodeCmd) SetEnv(env ...string) exec.Cmd { + c.env = env + return c +} + +func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd { + c.stdin = r + return c +} + +func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd { + c.stdout = w + return c +} + +func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd { + c.stderr = w + return c +} + +func (n *node) SerialLogs(w io.Writer) error { + return exec.Command("podman", "logs", n.name).SetStdout(w).SetStderr(w).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go new file mode 100644 index 000000000..856b07b04 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go @@ -0,0 +1,442 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/sets" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// NewProvider returns a new provider based on executing `podman ...` +func NewProvider(logger log.Logger) providers.Provider { + logger.Warn("enabling experimental podman provider") + return &provider{ + logger: logger, + } +} + +// Provider implements provider.Provider +// see NewProvider +type provider struct { + logger log.Logger + info *providers.ProviderInfo +} + +// String implements fmt.Stringer +// NOTE: the value of this should not currently be relied upon for anything! +// This is only used for setting the Node's providerID +func (p *provider) String() string { + return "podman" +} + +// Provision is part of the providers.Provider interface +func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) { + if err := ensureMinVersion(); err != nil { + return err + } + + // TODO: validate cfg + // ensure node images are pulled before actually provisioning + if err := ensureNodeImages(p.logger, status, cfg); err != nil { + return err + } + + // ensure the pre-requisite network exists + networkName := fixedNetworkName + if n := os.Getenv("KIND_EXPERIMENTAL_PODMAN_NETWORK"); n != "" { + p.logger.Warn("WARNING: Overriding podman network due to KIND_EXPERIMENTAL_PODMAN_NETWORK") + p.logger.Warn("WARNING: Here be dragons! This is not supported currently.") + networkName = n + } + if err := ensureNetwork(networkName); err != nil { + return errors.Wrap(err, "failed to ensure podman network") + } + + // actually provision the cluster + icons := strings.Repeat("📦 ", len(cfg.Nodes)) + status.Start(fmt.Sprintf("Preparing nodes %s", icons)) + defer func() { status.End(err == nil) }() + + // plan creating the containers + createContainerFuncs, err := planCreation(cfg, networkName) + if err != nil { + return err + } + + // actually create nodes + return errors.UntilErrorConcurrent(createContainerFuncs) +} + +// ListClusters is part of the providers.Provider interface +func (p *provider) ListClusters() ([]string, error) { + cmd := exec.Command("podman", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", "label="+clusterLabelKey, + // format to include the cluster name + "--format", fmt.Sprintf(`{{index .Labels "%s"}}`, clusterLabelKey), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + return sets.NewString(lines...).List(), nil +} + +// ListNodes is part of the providers.Provider interface +func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) { + cmd := exec.Command("podman", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster), + // format to include the cluster name + "--format", `{{.Names}}`, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list nodes") + } + // convert names to node handles + ret := make([]nodes.Node, 0, len(lines)) + for _, name := range lines { + ret = append(ret, p.node(name)) + } + return ret, nil +} + +// DeleteNodes is part of the providers.Provider interface +func (p *provider) DeleteNodes(n []nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "podman" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + var nodeVolumes []string + for _, node := range n { + volumes, err := getVolumes(node.String()) + if err != nil { + return err + } + nodeVolumes = append(nodeVolumes, volumes...) + } + if len(nodeVolumes) == 0 { + return nil + } + return deleteVolumes(nodeVolumes) +} + +// GetAPIServerEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + + // TODO: get rid of this once podman settles on how to get the port mapping using podman inspect + // This is only used to get the Kubeconfig server field + v, err := getPodmanVersion() + if err != nil { + return "", errors.Wrap(err, "failed to check podman version") + } + // podman inspect was broken between 2.2.0 and 3.0.0 + // https://github.com/containers/podman/issues/8444 + if v.AtLeast(version.MustParseSemantic("2.2.0")) && + v.LessThan(version.MustParseSemantic("3.0.0")) { + p.logger.Warnf("WARNING: podman version %s not fully supported, please use versions 3.0.0+") + + cmd := exec.Command( + "podman", "inspect", + "--format", + "{{range .NetworkSettings.Ports }}{{range .}}{{.HostIP}}/{{.HostPort}}{{end}}{{end}}", + n.String(), + ) + + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + // output is in the format IP/Port + parts := strings.Split(strings.TrimSpace(lines[0]), "/") + if len(parts) != 2 { + return "", errors.Errorf("network details should be in the format IP/Port, received: %s", parts) + } + host := parts[0] + port, err := strconv.Atoi(parts[1]) + if err != nil { + return "", errors.Errorf("network port not an integer: %v", err) + } + + return net.JoinHostPort(host, strconv.Itoa(port)), nil + } + + cmd := exec.Command( + "podman", "inspect", + "--format", + "{{ json .NetworkSettings.Ports }}", + n.String(), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + + // portMapping19 maps to the standard CNI portmapping capability used in podman 1.9 + // see: https://github.com/containernetworking/cni/blob/spec-v0.4.0/CONVENTIONS.md + type portMapping19 struct { + HostPort int32 `json:"hostPort"` + ContainerPort int32 `json:"containerPort"` + Protocol string `json:"protocol"` + HostIP string `json:"hostIP"` + } + // portMapping20 maps to the podman 2.0 portmap type + // see: https://github.com/containers/podman/blob/05988fc74fc25f2ad2256d6e011dfb7ad0b9a4eb/libpod/define/container_inspect.go#L134-L143 + type portMapping20 struct { + HostPort string `json:"HostPort"` + HostIP string `json:"HostIp"` + } + + portMappings20 := make(map[string][]portMapping20) + if err := json.Unmarshal([]byte(lines[0]), &portMappings20); err == nil { + for k, v := range portMappings20 { + protocol := "tcp" + parts := strings.Split(k, "/") + if len(parts) == 2 { + protocol = strings.ToLower(parts[1]) + } + containerPort, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + for _, pm := range v { + if containerPort == common.APIServerInternalPort && protocol == "tcp" { + return net.JoinHostPort(pm.HostIP, pm.HostPort), nil + } + } + } + } + + var portMappings19 []portMapping19 + if err := json.Unmarshal([]byte(lines[0]), &portMappings19); err != nil { + return "", errors.Errorf("invalid network details: %v", err) + } + for _, pm := range portMappings19 { + if pm.ContainerPort == common.APIServerInternalPort && pm.Protocol == "tcp" { + return net.JoinHostPort(pm.HostIP, strconv.Itoa(int(pm.HostPort))), nil + } + } + + return "", errors.Errorf("failed to get api server port") +} + +// GetAPIServerInternalEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get apiserver endpoint") + } + // NOTE: we're using the nodes's hostnames which are their names + return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil +} + +// node returns a new node handle for this provider +func (p *provider) node(name string) nodes.Node { + return &node{ + name: name, + } +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := common.FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + // construct a slice of methods to collect logs + fns := []func() error{ + // record info about the host podman + execToPathFn( + exec.Command("podman", "info"), + filepath.Join(dir, "podman-info.txt"), + ), + } + + // collect /var/log for each node and plan collecting more logs + var errs []error + for _, n := range nodes { + node := n // https://golang.org/doc/faq#closures_and_goroutines + name := node.String() + path := filepath.Join(dir, name) + if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil { + errs = append(errs, err) + } + + fns = append(fns, + func() error { return common.CollectLogs(node, path) }, + execToPathFn(exec.Command("podman", "inspect", name), filepath.Join(path, "inspect.json")), + func() error { + f, err := common.FileOnHost(filepath.Join(path, "serial.log")) + if err != nil { + return err + } + return node.SerialLogs(f) + }, + ) + } + + // run and collect up all errors + errs = append(errs, errors.AggregateConcurrent(fns)) + return errors.NewAggregate(errs) +} + +// Info returns the provider info. +// The info is cached on the first time of the execution. +func (p *provider) Info() (*providers.ProviderInfo, error) { + if p.info == nil { + var err error + p.info, err = info(p.logger) + if err != nil { + return p.info, err + } + } + return p.info, nil +} + +// podmanInfo corresponds to `podman info --format 'json`. +// The structure is different from `docker info --format '{{json .}}'`, +// and lacks information about the availability of the cgroup controllers. +type podmanInfo struct { + Host struct { + CgroupVersion string `json:"cgroupVersion,omitempty"` // "v2" + CgroupControllers []string `json:"cgroupControllers,omitempty"` + Security struct { + Rootless bool `json:"rootless,omitempty"` + } `json:"security"` + } `json:"host"` +} + +// info detects ProviderInfo by executing `podman info --format json`. +func info(logger log.Logger) (*providers.ProviderInfo, error) { + const podman = "podman" + args := []string{"info", "--format", "json"} + cmd := exec.Command(podman, args...) + out, err := exec.Output(cmd) + if err != nil { + return nil, errors.Wrapf(err, "failed to get podman info (%s %s): %q", + podman, strings.Join(args, " "), string(out)) + } + var pInfo podmanInfo + if err := json.Unmarshal(out, &pInfo); err != nil { + return nil, err + } + stringSliceContains := func(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + return false + } + + // Since Podman version before v4.0.0 does not gives controller info. + // We assume all the cgroup controllers to be available. + // For rootless, this assumption is not always correct, + // so we print the warning below. + cgroupSupportsMemoryLimit := true + cgroupSupportsPidsLimit := true + cgroupSupportsCPUShares := true + + v, err := getPodmanVersion() + if err != nil { + return nil, errors.Wrap(err, "failed to check podman version") + } + // Info for controllers must be available after v4.0.0 + // via https://github.com/containers/podman/pull/10387 + if v.AtLeast(version.MustParseSemantic("4.0.0")) { + cgroupSupportsMemoryLimit = stringSliceContains(pInfo.Host.CgroupControllers, "memory") + cgroupSupportsPidsLimit = stringSliceContains(pInfo.Host.CgroupControllers, "pids") + cgroupSupportsCPUShares = stringSliceContains(pInfo.Host.CgroupControllers, "cpu") + } + + info := &providers.ProviderInfo{ + Rootless: pInfo.Host.Security.Rootless, + Cgroup2: pInfo.Host.CgroupVersion == "v2", + SupportsMemoryLimit: cgroupSupportsMemoryLimit, + SupportsPidsLimit: cgroupSupportsPidsLimit, + SupportsCPUShares: cgroupSupportsCPUShares, + } + if info.Rootless && !v.AtLeast(version.MustParseSemantic("4.0.0")) { + if logger != nil { + logger.Warn("Cgroup controller detection is not implemented for Podman. " + + "If you see cgroup-related errors, you might need to set systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/") + } + } + return info, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go new file mode 100644 index 000000000..c240a2929 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go @@ -0,0 +1,436 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "context" + "encoding/json" + "fmt" + "net" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// planCreation creates a slice of funcs that will create the containers +func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) { + // these apply to all container creation + nodeNamer := common.MakeNodeNamer(cfg.Name) + names := make([]string, len(cfg.Nodes)) + for i, node := range cfg.Nodes { + name := nodeNamer(string(node.Role)) // name the node + names[i] = name + } + haveLoadbalancer := config.ClusterHasImplicitLoadBalancer(cfg) + if haveLoadbalancer { + names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue)) + } + genericArgs, err := commonArgs(cfg, networkName, names) + if err != nil { + return nil, err + } + + // only the external LB should reflect the port if we have multiple control planes + apiServerPort := cfg.Networking.APIServerPort + apiServerAddress := cfg.Networking.APIServerAddress + if config.ClusterHasImplicitLoadBalancer(cfg) { + // TODO: picking ports locally is less than ideal with a remote runtime + // (does podman have this?) + // but this is supposed to be an implementation detail and NOT picking + // them breaks host reboot ... + // For now remote podman + multi control plane is not supported + apiServerPort = 0 // replaced with random ports + apiServerAddress = "127.0.0.1" // only the LB needs to be non-local + // only for IPv6 only clusters + if cfg.Networking.IPFamily == config.IPv6Family { + apiServerAddress = "::1" // only the LB needs to be non-local + } + // plan loadbalancer node + name := names[len(names)-1] + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForLoadBalancer(cfg, name, genericArgs) + if err != nil { + return err + } + return createContainer(name, args) + }) + } + + // plan normal nodes + for i, node := range cfg.Nodes { + node := node.DeepCopy() // copy so we can modify + name := names[i] + + // fixup relative paths, podman can only handle absolute paths + for i := range node.ExtraMounts { + hostPath := node.ExtraMounts[i].HostPath + absHostPath, err := filepath.Abs(hostPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath) + } + node.ExtraMounts[i].HostPath = absHostPath + } + + // plan actual creation based on role + switch node.Role { + case config.ControlPlaneRole: + createContainerFuncs = append(createContainerFuncs, func() error { + node.ExtraPortMappings = append(node.ExtraPortMappings, + config.PortMapping{ + ListenAddress: apiServerAddress, + HostPort: apiServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + case config.WorkerRole: + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + default: + return nil, errors.Errorf("unknown node role: %q", node.Role) + } + } + return createContainerFuncs, nil +} + +// commonArgs computes static arguments that apply to all containers +func commonArgs(cfg *config.Cluster, networkName string, nodeNames []string) ([]string, error) { + // standard arguments all nodes containers need, computed once + args := []string{ + "--detach", // run the container detached + "--tty", // allocate a tty for entrypoint logs + "--net", networkName, // attach to its own network + // label the node with the cluster ID + "--label", fmt.Sprintf("%s=%s", clusterLabelKey, cfg.Name), + // specify container implementation to systemd + "-e", "container=podman", + // this is the default in cgroupsv2 but not in v1 + "--cgroupns=private", + } + + // enable IPv6 if necessary + if config.ClusterHasIPv6(cfg) { + args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") + } + + // pass proxy environment variables + proxyEnv, err := getProxyEnv(cfg, networkName, nodeNames) + if err != nil { + return nil, errors.Wrap(err, "proxy setup error") + } + for key, val := range proxyEnv { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, val)) + } + + // handle Podman on Btrfs or ZFS same as we do with Docker + // https://github.com/kubernetes-sigs/kind/issues/1416#issuecomment-606514724 + if mountDevMapper() { + args = append(args, "--volume", "/dev/mapper:/dev/mapper") + } + + // rootless: use fuse-overlayfs by default + // https://github.com/kubernetes-sigs/kind/issues/2275 + if mountFuse() { + args = append(args, "--device", "/dev/fuse") + } + + if cfg.Networking.DNSSearch != nil { + args = append(args, "-e", "KIND_DNS_SEARCH="+strings.Join(*cfg.Networking.DNSSearch, " ")) + } + + return args, nil +} + +func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { + // Pre-create anonymous volumes to enable specifying mount options + // during container run time + varVolume, err := createAnonymousVolume(name) + if err != nil { + return nil, err + } + + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), + // running containers in a container requires privileged + // NOTE: we could try to replicate this with --cap-add, and use less + // privileges, but this flag also changes some mounts that are necessary + // including some ones podman would otherwise do by default. + // for now this is what we want. in the future we may revisit this. + "--privileged", + // runtime temporary storage + "--tmpfs", "/tmp", // various things depend on working /tmp + "--tmpfs", "/run", // systemd wants a writable /run + // runtime persistent storage + // this ensures that E.G. pods, logs etc. are not on the container + // filesystem, which is not only better for performance, but allows + // running kind in kind for "party tricks" + // (please don't depend on doing this though!) + // also enable default docker volume options + // suid: SUID applications on the volume will be able to change their privilege + // exec: executables on the volume will be able to executed within the container + // dev: devices on the volume will be able to be used by processes within the container + "--volume", fmt.Sprintf("%s:/var:suid,exec,dev", varVolume), + // some k8s things want to read /lib/modules + "--volume", "/lib/modules:/lib/modules:ro", + // propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script + "-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER", + }, + args..., + ) + + // convert mounts and port mappings to container run args + args = append(args, generateMountBindings(node.ExtraMounts...)...) + mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + switch node.Role { + case config.ControlPlaneRole: + args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf") + } + + // finally, specify the image to run + _, image := sanitizeImage(node.Image) + return append(args, image), nil +} + +func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), + }, + args..., + ) + + // load balancer port mapping + mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily, + config.PortMapping{ + ListenAddress: cfg.Networking.APIServerAddress, + HostPort: cfg.Networking.APIServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + _, image := sanitizeImage(loadbalancer.Image) + return append(args, image), nil +} + +func getProxyEnv(cfg *config.Cluster, networkName string, nodeNames []string) (map[string]string, error) { + envs := common.GetProxyEnvs(cfg) + // Specifically add the podman network subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + // kind default bridge is "kind" + subnets, err := getSubnets(networkName) + if err != nil { + return nil, err + } + noProxyList := append(subnets, envs[common.NOProxy]) + noProxyList = append(noProxyList, nodeNames...) + // Add pod,service and all the cluster nodes' dns names to no_proxy to allow in cluster + // Note: this is best effort based on the default CoreDNS spec + // https://github.com/kubernetes/dns/blob/master/docs/specification.md + // Any user created pod/service hostnames, namespaces, custom DNS services + // are expected to be no-proxied by the user explicitly. + + noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local") + noProxyJoined := strings.Join(noProxyList, ",") + envs[common.NOProxy] = noProxyJoined + envs[strings.ToLower(common.NOProxy)] = noProxyJoined + } + return envs, nil +} + +type podmanNetworks []struct { + // v4+ + Subnets []struct { + Subnet string `json:"subnet"` + Gateway string `json:"gateway"` + } `json:"subnets"` + // v3 and anything still using CNI/IPAM + Plugins []struct { + Ipam struct { + Ranges [][]struct { + Gateway string `json:"gateway"` + Subnet string `json:"subnet"` + } `json:"ranges"` + } `json:"ipam,omitempty"` + } `json:"plugins"` +} + +func getSubnets(networkName string) ([]string, error) { + cmd := exec.Command("podman", "network", "inspect", networkName) + out, err := exec.Output(cmd) + + if err != nil { + return nil, errors.Wrap(err, "failed to get subnets") + } + + networks := podmanNetworks{} + jsonErr := json.Unmarshal([]byte(out), &networks) + if jsonErr != nil { + return nil, errors.Wrap(jsonErr, "failed to get subnets") + } + subnets := []string{} + for _, network := range networks { + if len(network.Subnets) > 0 { + for _, subnet := range network.Subnets { + subnets = append(subnets, subnet.Subnet) + } + } + if len(network.Plugins) > 0 { + for _, plugin := range network.Plugins { + for _, r := range plugin.Ipam.Ranges { + for _, rr := range r { + subnets = append(subnets, rr.Subnet) + } + } + } + } + } + return subnets, nil +} + +// generateMountBindings converts the mount list to a list of args for podman +// ':[:options]', where 'options' +// is a comma-separated list of the following strings: +// 'ro', if the path is read only +// 'Z', if the volume requires SELinux relabeling +func generateMountBindings(mounts ...config.Mount) []string { + args := make([]string, 0, len(mounts)) + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + var attrs []string + if m.Readonly { + attrs = append(attrs, "ro") + } + // Only request relabeling if the pod provides an SELinux context. If the pod + // does not provide an SELinux context relabeling will label the volume with + // the container's randomly allocated MCS label. This would restrict access + // to the volume to the container which mounts it first. + if m.SelinuxRelabel { + attrs = append(attrs, "Z") + } + switch m.Propagation { + case config.MountPropagationNone: + // noop, private is default + case config.MountPropagationBidirectional: + attrs = append(attrs, "rshared") + case config.MountPropagationHostToContainer: + attrs = append(attrs, "rslave") + default: // Falls back to "private" + } + if len(attrs) > 0 { + bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ",")) + } + args = append(args, fmt.Sprintf("--volume=%s", bind)) + } + return args +} + +// generatePortMappings converts the portMappings list to a list of args for podman +func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) { + args := make([]string, 0, len(portMappings)) + for _, pm := range portMappings { + // do provider internal defaulting + // in a future API revision we will handle this at the API level and remove this + if pm.ListenAddress == "" { + switch clusterIPFamily { + case config.IPv4Family, config.DualStackFamily: + pm.ListenAddress = "0.0.0.0" + case config.IPv6Family: + pm.ListenAddress = "::" + default: + return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily) + } + } + if string(pm.Protocol) == "" { + pm.Protocol = config.PortMappingProtocolTCP // TCP is the default + } + + // validate that the provider can handle this binding + switch pm.Protocol { + case config.PortMappingProtocolTCP: + case config.PortMappingProtocolUDP: + case config.PortMappingProtocolSCTP: + default: + return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol) + } + + // get a random port if necessary (port = 0) + hostPort, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to get random host port for port mapping") + } + + // generate the actual mapping arg + protocol := string(pm.Protocol) + hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort)) + // Podman expects empty string instead of 0 to assign a random port + // https://github.com/containers/libpod/blob/master/pkg/spec/ports.go#L68-L69 + if strings.HasSuffix(hostPortBinding, ":0") { + hostPortBinding = strings.TrimSuffix(hostPortBinding, "0") + } + args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, strings.ToLower(protocol))) + } + return args, nil +} + +func createContainer(name string, args []string) error { + if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + return nil +} + +func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error { + if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + + logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer logCancel() + logCmd := exec.CommandContext(logCtx, "podman", "logs", "-f", name) + return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp()) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go new file mode 100644 index 000000000..969f09ccb --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go @@ -0,0 +1,169 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "encoding/json" + "fmt" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/internal/version" +) + +// IsAvailable checks if podman is available in the system +func IsAvailable() bool { + cmd := exec.Command("podman", "-v") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + return strings.HasPrefix(lines[0], "podman version") +} + +func getPodmanVersion() (*version.Version, error) { + cmd := exec.Command("podman", "--version") + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, err + } + + // output is like `podman version 1.7.1-dev` + if len(lines) != 1 { + return nil, errors.Errorf("podman version should only be one line, got %d", len(lines)) + } + parts := strings.Split(lines[0], " ") + if len(parts) != 3 { + return nil, errors.Errorf("podman --version contents should have 3 parts, got %q", lines[0]) + } + return version.ParseSemantic(parts[2]) +} + +const ( + minSupportedVersion = "1.8.0" +) + +func ensureMinVersion() error { + // ensure that podman version is a compatible version + v, err := getPodmanVersion() + if err != nil { + return errors.Wrap(err, "failed to check podman version") + } + if !v.AtLeast(version.MustParseSemantic(minSupportedVersion)) { + return errors.Errorf("podman version %q is too old, please upgrade to %q or later", v, minSupportedVersion) + } + return nil +} + +// createAnonymousVolume creates a new anonymous volume +// with the specified label=true +// returns the name of the volume created +func createAnonymousVolume(label string) (string, error) { + cmd := exec.Command("podman", + "volume", + "create", + // podman only support filter on key during list + // so we use the unique id as key + "--label", fmt.Sprintf("%s=true", label)) + name, err := exec.Output(cmd) + if err != nil { + return "", err + } + return strings.TrimSuffix(string(name), "\n"), nil +} + +// getVolumes gets volume names filtered on specified label +func getVolumes(label string) ([]string, error) { + cmd := exec.Command("podman", + "volume", + "ls", + "--filter", fmt.Sprintf("label=%s", label), + "--quiet") + // `output` from the above command is names of all volumes each followed by `\n`. + output, err := exec.Output(cmd) + if err != nil { + return nil, err + } + if string(output) == "" { + // no volumes + return nil, nil + } + // Trim away the last `\n`. + trimmedOutput := strings.TrimSuffix(string(output), "\n") + // Get names of all volumes by splitting via `\n`. + return strings.Split(string(trimmedOutput), "\n"), nil +} + +func deleteVolumes(names []string) error { + args := []string{ + "volume", + "rm", + "--force", + } + args = append(args, names...) + cmd := exec.Command("podman", args...) + return cmd.Run() +} + +// mountDevMapper checks if the podman storage driver is Btrfs or ZFS +func mountDevMapper() bool { + cmd := exec.Command("podman", "info", "--format", "json") + out, err := exec.Output(cmd) + if err != nil { + return false + } + + var pInfo podmanStorageInfo + if err := json.Unmarshal(out, &pInfo); err != nil { + return false + } + + // match docker logic pkg/cluster/internal/providers/docker/util.go + if pInfo.Store.GraphDriverName == "btrfs" || + pInfo.Store.GraphDriverName == "zfs" || + pInfo.Store.GraphDriverName == "devicemapper" || + pInfo.Store.GraphStatus.BackingFilesystem == "btrfs" || + pInfo.Store.GraphStatus.BackingFilesystem == "xfs" || + pInfo.Store.GraphStatus.BackingFilesystem == "zfs" { + return true + } + return false +} + +type podmanStorageInfo struct { + Store struct { + GraphDriverName string `json:"graphDriverName,omitempty"` + GraphStatus struct { + BackingFilesystem string `json:"Backing Filesystem,omitempty"` // "v2" + } `json:"graphStatus"` + } `json:"store"` +} + +// rootless: use fuse-overlayfs by default +// https://github.com/kubernetes-sigs/kind/issues/2275 +func mountFuse() bool { + i, err := info(nil) + if err != nil { + return false + } + if i != nil && i.Rootless { + return true + } + return false +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go new file mode 100644 index 000000000..cc4270609 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package providers + +import ( + "sigs.k8s.io/kind/pkg/cluster/nodes" + + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// Provider represents a provider of cluster / node infrastructure +// This is an alpha-grade internal API +type Provider interface { + // Provision should create and start the nodes, just short of + // actually starting up Kubernetes, based on the given cluster config + Provision(status *cli.Status, cfg *config.Cluster) error + // ListClusters discovers the clusters that currently have resources + // under this providers + ListClusters() ([]string, error) + // ListNodes returns the nodes under this provider for the given + // cluster name, they may or may not be running correctly + ListNodes(cluster string) ([]nodes.Node, error) + // DeleteNodes deletes the provided list of nodes + // These should be from results previously returned by this provider + // E.G. by ListNodes() + DeleteNodes([]nodes.Node) error + // GetAPIServerEndpoint returns the host endpoint for the cluster's API server + GetAPIServerEndpoint(cluster string) (string, error) + // GetAPIServerInternalEndpoint returns the internal network endpoint for the cluster's API server + GetAPIServerInternalEndpoint(cluster string) (string, error) + // CollectLogs will populate dir with cluster logs and other debug files + CollectLogs(dir string, nodes []nodes.Node) error + // Info returns the provider info + Info() (*ProviderInfo, error) +} + +// ProviderInfo is the info of the provider +type ProviderInfo struct { + Rootless bool + Cgroup2 bool + SupportsMemoryLimit bool + SupportsPidsLimit bool + SupportsCPUShares bool +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go new file mode 100644 index 000000000..4cd87b0ec --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodes provides a kind specific definition of a cluster node +package nodes diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go new file mode 100644 index 000000000..a2c1b33b4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodes + +import ( + "io" + + "sigs.k8s.io/kind/pkg/exec" +) + +// Node represents a kind cluster node +type Node interface { + // The node should implement exec.Cmder for running commands against the node + // see: sigs.k8s.io/kind/pkg/exec + exec.Cmder + // String should return the node name + String() string // see also: fmt.Stringer + // Role should return the node's role + Role() (string, error) // see also: pkg/cluster/constants + // TODO(bentheelder): should return node addresses more generally + // Possibly remove this method in favor of obtaining this detail with + // exec or from the provider + IP() (ipv4 string, ipv6 string, err error) + // SerialLogs collects the "node" container logs + SerialLogs(writer io.Writer) error +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go new file mode 100644 index 000000000..b1e72e95a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodeutils contains functionality for Kubernetes-in-Docker nodes +// It mostly exists to break up functionality from sigs.k8s.io/kind/pkg/cluster +package nodeutils diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go new file mode 100644 index 000000000..27deb93cc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go @@ -0,0 +1,153 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeutils + +import ( + "sort" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" +) + +// SelectNodesByRole returns a list of nodes with the matching role +// TODO(bentheelder): remove this in favor of specific role select methods +// and avoid the unnecessary error handling +func SelectNodesByRole(allNodes []nodes.Node, role string) ([]nodes.Node, error) { + out := []nodes.Node{} + for _, node := range allNodes { + nodeRole, err := node.Role() + if err != nil { + return nil, err + } + if nodeRole == role { + out = append(out, node) + } + } + return out, nil +} + +// InternalNodes returns the list of container IDs for the "nodes" in the cluster +// that are ~Kubernetes nodes, as opposed to e.g. the external loadbalancer for HA +func InternalNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + selectedNodes := []nodes.Node{} + for _, node := range allNodes { + nodeRole, err := node.Role() + if err != nil { + return nil, err + } + if nodeRole == constants.WorkerNodeRoleValue || nodeRole == constants.ControlPlaneNodeRoleValue { + selectedNodes = append(selectedNodes, node) + } + } + return selectedNodes, nil +} + +// ExternalLoadBalancerNode returns a node handle for the external control plane +// loadbalancer node or nil if there isn't one +func ExternalLoadBalancerNode(allNodes []nodes.Node) (nodes.Node, error) { + // identify and validate external load balancer node + loadBalancerNodes, err := SelectNodesByRole( + allNodes, + constants.ExternalLoadBalancerNodeRoleValue, + ) + if err != nil { + return nil, err + } + if len(loadBalancerNodes) < 1 { + return nil, nil + } + if len(loadBalancerNodes) > 1 { + return nil, errors.Errorf( + "unexpected number of %s nodes %d", + constants.ExternalLoadBalancerNodeRoleValue, + len(loadBalancerNodes), + ) + } + return loadBalancerNodes[0], nil +} + +// APIServerEndpointNode selects the node from allNodes which hosts the API Server endpoint +// This should be the control plane node if there is one control plane node, or a LoadBalancer otherwise. +// It returns an error if the node list is invalid (E.G. two control planes and no load balancer) +func APIServerEndpointNode(allNodes []nodes.Node) (nodes.Node, error) { + if n, err := ExternalLoadBalancerNode(allNodes); err != nil { + return nil, errors.Wrap(err, "failed to find api-server endpoint node") + } else if n != nil { + return n, nil + } + n, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, errors.Wrap(err, "failed to find api-server endpoint node") + } + if len(n) != 1 { + return nil, errors.Errorf("expected one control plane node or a load balancer, not %d and none", len(n)) + } + return n[0], nil +} + +// ControlPlaneNodes returns all control plane nodes such that the first entry +// is the bootstrap control plane node +func ControlPlaneNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + controlPlaneNodes, err := SelectNodesByRole( + allNodes, + constants.ControlPlaneNodeRoleValue, + ) + if err != nil { + return nil, err + } + // pick the first by sorting + // TODO(bentheelder): perhaps in the future we should mark this node + // specially at container creation time + sort.Slice(controlPlaneNodes, func(i, j int) bool { + return strings.Compare(controlPlaneNodes[i].String(), controlPlaneNodes[j].String()) < 0 + }) + return controlPlaneNodes, nil +} + +// BootstrapControlPlaneNode returns a handle to the bootstrap control plane node +// TODO(bentheelder): remove this. This node shouldn't be special (fix that first) +func BootstrapControlPlaneNode(allNodes []nodes.Node) (nodes.Node, error) { + controlPlaneNodes, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, err + } + if len(controlPlaneNodes) < 1 { + return nil, errors.Errorf( + "expected at least one %s node", + constants.ControlPlaneNodeRoleValue, + ) + } + return controlPlaneNodes[0], nil +} + +// SecondaryControlPlaneNodes returns handles to the secondary +// control plane nodes and NOT the bootstrap control plane node +func SecondaryControlPlaneNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + controlPlaneNodes, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, err + } + if len(controlPlaneNodes) < 1 { + return nil, errors.Errorf( + "expected at least one %s node", + constants.ControlPlaneNodeRoleValue, + ) + } + return controlPlaneNodes[1:], nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go new file mode 100644 index 000000000..501681a2c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go @@ -0,0 +1,156 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeutils + +import ( + "bytes" + "encoding/json" + "io" + "path" + "strings" + + "github.com/pelletier/go-toml" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// KubeVersion returns the Kubernetes version installed on the node +func KubeVersion(n nodes.Node) (version string, err error) { + // grab kubernetes version from the node image + cmd := n.Command("cat", "/kind/version") + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get file") + } + if len(lines) != 1 { + return "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + return lines[0], nil +} + +// WriteFile writes content to dest on the node +func WriteFile(n nodes.Node, dest, content string) error { + // create destination directory + err := n.Command("mkdir", "-p", path.Dir(dest)).Run() + if err != nil { + return errors.Wrapf(err, "failed to create directory %s", path.Dir(dest)) + } + + return n.Command("cp", "/dev/stdin", dest).SetStdin(strings.NewReader(content)).Run() +} + +// CopyNodeToNode copies file from a to b +func CopyNodeToNode(a, b nodes.Node, file string) error { + // create destination directory + err := b.Command("mkdir", "-p", path.Dir(file)).Run() + if err != nil { + return errors.Wrapf(err, "failed to create directory %q", path.Dir(file)) + } + + // TODO: experiment with streaming instead to avoid the copy + // for now we only use this for small files so it's not worth the complexity + var buff bytes.Buffer + if err := a.Command("cat", file).SetStdout(&buff).Run(); err != nil { + return errors.Wrapf(err, "failed to read %q from node", file) + } + if err := b.Command("cp", "/dev/stdin", file).SetStdin(&buff).Run(); err != nil { + return errors.Wrapf(err, "failed to write %q to node", file) + } + + return nil +} + +// LoadImageArchive loads image onto the node, where image is a Reader over an image archive +func LoadImageArchive(n nodes.Node, image io.Reader) error { + snapshotter, err := getSnapshotter(n) + if err != nil { + return err + } + cmd := n.Command("ctr", "--namespace=k8s.io", "images", "import", "--all-platforms", "--digests", "--snapshotter="+snapshotter, "-").SetStdin(image) + if err := cmd.Run(); err != nil { + return errors.Wrap(err, "failed to load image") + } + return nil +} + +func getSnapshotter(n nodes.Node) (string, error) { + out, err := exec.Output(n.Command("containerd", "config", "dump")) + if err != nil { + return "", errors.Wrap(err, "failed to detect containerd snapshotter") + } + return parseSnapshotter(string(out)) +} + +func parseSnapshotter(config string) (string, error) { + parsed, err := toml.Load(config) + if err != nil { + return "", errors.Wrap(err, "failed to detect containerd snapshotter") + } + snapshotter, ok := parsed.GetPath([]string{"plugins", "io.containerd.grpc.v1.cri", "containerd", "snapshotter"}).(string) + if !ok { + return "", errors.New("failed to detect containerd snapshotter") + } + return snapshotter, nil +} + +// ImageID returns ID of image on the node with the given image name if present +func ImageID(n nodes.Node, image string) (string, error) { + var out bytes.Buffer + if err := n.Command("crictl", "inspecti", image).SetStdout(&out).Run(); err != nil { + return "", err + } + // we only care about the image ID + crictlOut := struct { + Status struct { + ID string `json:"id"` + } `json:"status"` + }{} + if err := json.Unmarshal(out.Bytes(), &crictlOut); err != nil { + return "", err + } + return crictlOut.Status.ID, nil +} + +// ImageTags is used to perform a reverse lookup of the ImageID to list set of available +// RepoTags corresponding to the ImageID in question +func ImageTags(n nodes.Node, imageID string) (map[string]bool, error) { + var out bytes.Buffer + tags := make(map[string]bool, 0) + if err := n.Command("crictl", "inspecti", imageID).SetStdout(&out).Run(); err != nil { + return tags, err + } + crictlOut := struct { + Status struct { + RepoTags []string `json:"repoTags"` + } `json:"status"` + }{} + if err := json.Unmarshal(out.Bytes(), &crictlOut); err != nil { + return tags, err + } + for _, tag := range crictlOut.Status.RepoTags { + tags[tag] = true + } + return tags, nil +} + +// ReTagImage is used to tag an ImageID with a custom tag specified by imageName parameter +func ReTagImage(n nodes.Node, imageID, imageName string) error { + var out bytes.Buffer + return n.Command("ctr", "--namespace=k8s.io", "images", "tag", "--force", imageID, imageName).SetStdout(&out).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go new file mode 100644 index 000000000..3cff17478 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go @@ -0,0 +1,246 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "os" + "path/filepath" + "sort" + + "sigs.k8s.io/kind/pkg/cmd/kind/version" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create" + internaldelete "sigs.k8s.io/kind/pkg/cluster/internal/delete" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" + internalproviders "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/docker" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/podman" +) + +// DefaultName is the default cluster name +const DefaultName = constants.DefaultClusterName + +// defaultName is a helper that given a name defaults it if unset +func defaultName(name string) string { + if name == "" { + name = DefaultName + } + return name +} + +// Provider is used to perform cluster operations +type Provider struct { + provider internalproviders.Provider + logger log.Logger +} + +// NewProvider returns a new provider based on the supplied options +func NewProvider(options ...ProviderOption) *Provider { + p := &Provider{ + logger: log.NoopLogger{}, + } + // Ensure we apply the logger options first, while maintaining the order + // otherwise. This way we can trivially init the internal provider with + // the logger. + sort.SliceStable(options, func(i, j int) bool { + _, iIsLogger := options[i].(providerLoggerOption) + _, jIsLogger := options[j].(providerLoggerOption) + return iIsLogger && !jIsLogger + }) + for _, o := range options { + if o != nil { + o.apply(p) + } + } + + // ensure a provider if none was set + // NOTE: depends on logger being set (see sorting above) + if p.provider == nil { + // DetectNodeProvider does not fallback to allow callers to determine + // this behavior + // However for compatibility if the caller of NewProvider supplied no + // option and we autodetect internally, we default to the docker provider + // for fallback, to avoid a breaking change for now. + // This may change in the future. + // TODO: consider breaking this API for earlier errors. + providerOpt, _ := DetectNodeProvider() + if providerOpt == nil { + providerOpt = ProviderWithDocker() + } + providerOpt.apply(p) + } + return p +} + +// NoNodeProviderDetectedError indicates that we could not autolocate an available +// NodeProvider backend on the host +var NoNodeProviderDetectedError = errors.NewWithoutStack("failed to detect any supported node provider") + +// DetectNodeProvider allows callers to autodetect the node provider +// *without* fallback to the default. +// +// Pass the returned ProviderOption to NewProvider to pass the auto-detect Docker +// or Podman option explicitly (in the future there will be more options) +// +// NOTE: The kind *cli* also checks `KIND_EXPERIMENTAL_PROVIDER` for "podman" or +// "docker" currently and does not auto-detect / respects this if set. +// +// This will be replaced with some other mechanism in the future (likely when +// podman support is GA), in the meantime though your tool may wish to match this. +// +// In the future when this is not considered experimental, +// that logic will be in a public API as well. +func DetectNodeProvider() (ProviderOption, error) { + // auto-detect based on each node provider's IsAvailable() function + if docker.IsAvailable() { + return ProviderWithDocker(), nil + } + if podman.IsAvailable() { + return ProviderWithPodman(), nil + } + return nil, errors.WithStack(NoNodeProviderDetectedError) +} + +// ProviderOption is an option for configuring a provider +type ProviderOption interface { + apply(p *Provider) +} + +// providerLoggerOption is a trivial ProviderOption adapter +// we use a type specific to logging options so we can handle them first +type providerLoggerOption func(p *Provider) + +func (a providerLoggerOption) apply(p *Provider) { + a(p) +} + +var _ ProviderOption = providerLoggerOption(nil) + +// ProviderWithLogger configures the provider to use Logger logger +func ProviderWithLogger(logger log.Logger) ProviderOption { + return providerLoggerOption(func(p *Provider) { + p.logger = logger + }) +} + +// providerRuntimeOption is a trivial ProviderOption adapter +// we use a type specific to logging options so we can handle them first +type providerRuntimeOption func(p *Provider) + +func (a providerRuntimeOption) apply(p *Provider) { + a(p) +} + +var _ ProviderOption = providerRuntimeOption(nil) + +// ProviderWithDocker configures the provider to use docker runtime +func ProviderWithDocker() ProviderOption { + return providerRuntimeOption(func(p *Provider) { + p.provider = docker.NewProvider(p.logger) + }) +} + +// ProviderWithPodman configures the provider to use podman runtime +func ProviderWithPodman() ProviderOption { + return providerRuntimeOption(func(p *Provider) { + p.provider = podman.NewProvider(p.logger) + }) +} + +// Create provisions and starts a kubernetes-in-docker cluster +func (p *Provider) Create(name string, options ...CreateOption) error { + // apply options + opts := &internalcreate.ClusterOptions{ + NameOverride: name, + } + for _, o := range options { + if err := o.apply(opts); err != nil { + return err + } + } + return internalcreate.Cluster(p.logger, p.provider, opts) +} + +// Delete tears down a kubernetes-in-docker cluster +func (p *Provider) Delete(name, explicitKubeconfigPath string) error { + return internaldelete.Cluster(p.logger, p.provider, defaultName(name), explicitKubeconfigPath) +} + +// List returns a list of clusters for which nodes exist +func (p *Provider) List() ([]string, error) { + return p.provider.ListClusters() +} + +// KubeConfig returns the KUBECONFIG for the cluster +// If internal is true, this will contain the internal IP etc. +// If internal is false, this will contain the host IP etc. +func (p *Provider) KubeConfig(name string, internal bool) (string, error) { + return kubeconfig.Get(p.provider, defaultName(name), !internal) +} + +// ExportKubeConfig exports the KUBECONFIG for the cluster, merging +// it into the selected file, following the rules from +// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config +// where explicitPath is the --kubeconfig value. +func (p *Provider) ExportKubeConfig(name string, explicitPath string, internal bool) error { + return kubeconfig.Export(p.provider, defaultName(name), explicitPath, !internal) +} + +// ListNodes returns the list of container IDs for the "nodes" in the cluster +func (p *Provider) ListNodes(name string) ([]nodes.Node, error) { + return p.provider.ListNodes(defaultName(name)) +} + +// ListInternalNodes returns the list of container IDs for the "nodes" in the cluster +// that are not external +func (p *Provider) ListInternalNodes(name string) ([]nodes.Node, error) { + n, err := p.provider.ListNodes(name) + if err != nil { + return nil, err + } + return nodeutils.InternalNodes(n) +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *Provider) CollectLogs(name, dir string) error { + // TODO: should use ListNodes and Collect should handle nodes differently + // based on role ... + n, err := p.ListInternalNodes(name) + if err != nil { + return err + } + // ensure directory + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return errors.Wrap(err, "failed to create logs directory") + } + // write kind version + if err := os.WriteFile( + filepath.Join(dir, "kind-version.txt"), + []byte(version.DisplayVersion()), + 0666, // match os.Create + ); err != nil { + return errors.Wrap(err, "failed to write kind-version.txt") + } + // collect and write cluster logs + return p.provider.CollectLogs(dir, n) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go b/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go new file mode 100644 index 000000000..ae20f68a7 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go @@ -0,0 +1,15 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cmd provides helpers used by kind's commands / cli +package cmd diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go b/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go new file mode 100644 index 000000000..2903235de --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" +) + +// IOStreams provides the standard names for iostreams. +// This is useful for embedding and for unit testing. +// Inconsistent and different names make it hard to read and review code +// This is based on cli-runtime, but just the nice type without the dependency +type IOStreams struct { + // In think, os.Stdin + In io.Reader + // Out think, os.Stdout + Out io.Writer + // ErrOut think, os.Stderr + ErrOut io.Writer +} + +// StandardIOStreams returns an IOStreams from os.Stdin, os.Stdout +func StandardIOStreams() IOStreams { + return IOStreams{ + In: os.Stdin, + Out: os.Stdout, + ErrOut: os.Stderr, + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go new file mode 100644 index 000000000..9f010fdda --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go @@ -0,0 +1,97 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version implements the `version` command +package version + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" + + "sigs.k8s.io/kind/pkg/cmd" + "sigs.k8s.io/kind/pkg/log" +) + +// Version returns the kind CLI Semantic Version +func Version() string { + v := versionCore + // add pre-release version info if we have it + if versionPreRelease != "" { + v += "-" + versionPreRelease + // If gitCommitCount was set, add to the pre-release version + if gitCommitCount != "" { + v += "." + gitCommitCount + } + // if commit was set, add the + + // we only do this for pre-release versions + if gitCommit != "" { + // NOTE: use 14 character short hash, like Kubernetes + v += "+" + truncate(gitCommit, 14) + } + } + return v +} + +// DisplayVersion is Version() display formatted, this is what the version +// subcommand prints +func DisplayVersion() string { + return "kind v" + Version() + " " + runtime.Version() + " " + runtime.GOOS + "/" + runtime.GOARCH +} + +// versionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0 +const versionCore = "0.20.0" + +// versionPreRelease is the base pre-release portion of the kind CLI version per +// Semantic Versioning 2.0.0 +const versionPreRelease = "" + +// gitCommitCount count the commits since the last release. +// It is injected at build time. +var gitCommitCount = "" + +// gitCommit is the commit used to build the kind binary, if available. +// It is injected at build time. +var gitCommit = "" + +// NewCommand returns a new cobra.Command for version +func NewCommand(logger log.Logger, streams cmd.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Args: cobra.NoArgs, + Use: "version", + Short: "Prints the kind CLI version", + Long: "Prints the kind CLI version", + RunE: func(cmd *cobra.Command, args []string) error { + if logger.V(0).Enabled() { + // if not -q / --quiet, show lots of info + fmt.Fprintln(streams.Out, DisplayVersion()) + } else { + // otherwise only show semver + fmt.Fprintln(streams.Out, Version()) + } + return nil + }, + } + return cmd +} + +func truncate(s string, maxLen int) string { + if len(s) < maxLen { + return s + } + return s[:maxLen] +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go b/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go new file mode 100644 index 000000000..eaf0f944c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/env" +) + +// NewLogger returns the standard logger used by the kind CLI +// This logger writes to os.Stderr +func NewLogger() log.Logger { + var writer io.Writer = os.Stderr + if env.IsSmartTerminal(writer) { + writer = cli.NewSpinner(writer) + } + return cli.NewLogger(writer, 0) +} + +// ColorEnabled returns true if color is enabled for the logger +// this should be used to control output +func ColorEnabled(logger log.Logger) bool { + type maybeColorer interface { + ColorEnabled() bool + } + v, ok := logger.(maybeColorer) + return ok && v.ColorEnabled() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go new file mode 100644 index 000000000..5258cd2e1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +// NewAggregate is a k8s.io/apimachinery/pkg/util/errors.NewAggregate compatible wrapper +// note that while it returns a StackTrace wrapped Aggregate +// That has been Flattened and Reduced +func NewAggregate(errlist []error) error { + return WithStack( + reduce( + flatten( + newAggregate(errlist), + ), + ), + ) +} + +// Errors returns the deepest Aggregate in a Cause chain +func Errors(err error) []error { + var errors Aggregate + for { + if v, ok := err.(Aggregate); ok { + errors = v + } + if causerErr, ok := err.(Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + if errors != nil { + return errors.Errors() + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go new file mode 100644 index 000000000..3e9ec30b4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go @@ -0,0 +1,167 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + + "sigs.k8s.io/kind/pkg/internal/sets" +) + +/* + The contents of this file are lightly forked from k8s.io/apimachinery/pkg/util/errors + Forking makes kind easier to import, and this code is stable. + + Currently the only source changes are renaming some methods so as to not + export them. +*/ + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +// The aggregate can be used with `errors.Is()` to check for the occurrence of +// a specific error type. +// Errors.As() is not supported, because the caller presumably cares about a +// specific error of potentially multiple that match the given type. +// +// NOTE: this type is originally from k8s.io/apimachinery/pkg/util/errors.Aggregate +// Since it is an interface, you can use the implementing types interchangeably +type Aggregate interface { + error + Errors() []error + Is(error) bool +} + +func newAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return newAggregate(result) +} + +// reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + seenerrs := sets.NewString() + result := "" + agg.visit(func(err error) bool { + msg := err.Error() + if seenerrs.Has(msg) { + return false + } + seenerrs.Insert(msg) + if len(seenerrs) > 1 { + result += ", " + } + result += msg + return false + }) + if len(seenerrs) == 1 { + return result + } + return "[" + result + "]" +} + +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go b/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go new file mode 100644 index 000000000..1d90ad197 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "sync" +) + +// UntilErrorConcurrent runs all funcs in separate goroutines, returning the +// first non-nil error returned from funcs, or nil if all funcs return nil +func UntilErrorConcurrent(funcs []func() error) error { + errCh := make(chan error, len(funcs)) + for _, f := range funcs { + f := f // capture f + go func() { + errCh <- f() + }() + } + for i := 0; i < len(funcs); i++ { + if err := <-errCh; err != nil { + return err + } + } + return nil +} + +// AggregateConcurrent runs fns concurrently, returning a NewAggregate if there are > 1 errors +func AggregateConcurrent(funcs []func() error) error { + // run all fns concurrently + ch := make(chan error, len(funcs)) + var wg sync.WaitGroup + for _, f := range funcs { + f := f // capture f + wg.Add(1) + go func() { + defer wg.Done() + ch <- f() + }() + } + wg.Wait() + close(ch) + // collect up and return errors + errs := []error{} + for err := range ch { + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 1 { + return NewAggregate(errs) + } else if len(errs) == 1 { + return errs[0] + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/doc.go b/vendor/sigs.k8s.io/kind/pkg/errors/doc.go new file mode 100644 index 000000000..c93a68db3 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors provides common utilities for dealing with errors +package errors diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/errors.go b/vendor/sigs.k8s.io/kind/pkg/errors/errors.go new file mode 100644 index 000000000..98bc47bf2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/errors.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + stderrors "errors" + + pkgerrors "github.com/pkg/errors" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return pkgerrors.New(message) +} + +// NewWithoutStack is like new but does NOT wrap with a stack +// This is useful for exported errors +func NewWithoutStack(message string) error { + return stderrors.New(message) +} + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. Errorf also records the stack trace at the +// point it was called. +func Errorf(format string, args ...interface{}) error { + return pkgerrors.Errorf(format, args...) +} + +// Wrap returns an error annotating err with a stack trace at the point Wrap +// is called, and the supplied message. If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + return pkgerrors.Wrap(err, message) +} + +// Wrapf returns an error annotating err with a stack trace at the point Wrapf +// is called, and the format specifier. If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + return pkgerrors.Wrapf(err, format, args...) +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + return pkgerrors.WithStack(err) +} + +// Causer is an interface to github.com/pkg/errors error's Cause() wrapping +type Causer interface { + // Cause returns the underlying error + Cause() error +} + +// StackTracer is an interface to github.com/pkg/errors error's StackTrace() +type StackTracer interface { + // StackTrace returns the StackTrace ... + // TODO: return our own type instead? + // https://github.com/pkg/errors#roadmap + StackTrace() pkgerrors.StackTrace +} + +// StackTrace returns the deepest StackTrace in a Cause chain +// https://github.com/pkg/errors/issues/173 +func StackTrace(err error) pkgerrors.StackTrace { + var stackErr error + for { + if _, ok := err.(StackTracer); ok { + stackErr = err + } + if causerErr, ok := err.(Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + if stackErr != nil { + return stackErr.(StackTracer).StackTrace() + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/default.go b/vendor/sigs.k8s.io/kind/pkg/exec/default.go new file mode 100644 index 000000000..98a215cdf --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/default.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import "context" + +// DefaultCmder is a LocalCmder instance used for convenience, packages +// originally using os/exec.Command can instead use pkg/kind/exec.Command +// which forwards to this instance +// TODO(bentheelder): swap this for testing +// TODO(bentheelder): consider not using a global for this :^) +var DefaultCmder = &LocalCmder{} + +// Command is a convenience wrapper over DefaultCmder.Command +func Command(command string, args ...string) Cmd { + return DefaultCmder.Command(command, args...) +} + +// CommandContext is a convenience wrapper over DefaultCmder.CommandContext +func CommandContext(ctx context.Context, command string, args ...string) Cmd { + return DefaultCmder.CommandContext(ctx, command, args...) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/doc.go b/vendor/sigs.k8s.io/kind/pkg/exec/doc.go new file mode 100644 index 000000000..68214779b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec contains an interface for executing commands, along with helpers +// TODO(bentheelder): add standardized timeout functionality & a default timeout +// so that commands cannot hang indefinitely (!) +package exec diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go b/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go new file mode 100644 index 000000000..e579589e8 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go @@ -0,0 +1,142 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bufio" + "bytes" + "io" + "os" + "strings" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/errors" +) + +// PrettyCommand takes arguments identical to Cmder.Command, +// it returns a pretty printed command that could be pasted into a shell +func PrettyCommand(name string, args ...string) string { + var out strings.Builder + out.WriteString(shellescape.Quote(name)) + for _, arg := range args { + out.WriteByte(' ') + out.WriteString(shellescape.Quote(arg)) + } + return out.String() +} + +// RunErrorForError returns a RunError if the error contains a RunError. +// Otherwise it returns nil +func RunErrorForError(err error) *RunError { + var runError *RunError + for { + if rErr, ok := err.(*RunError); ok { + runError = rErr + } + if causerErr, ok := err.(errors.Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + return runError +} + +// CombinedOutputLines is like os/exec's cmd.CombinedOutput(), +// but over our Cmd interface, and instead of returning the byte buffer of +// stderr + stdout, it scans these for lines and returns a slice of output lines +func CombinedOutputLines(cmd Cmd) (lines []string, err error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + cmd.SetStderr(&buff) + err = cmd.Run() + scanner := bufio.NewScanner(&buff) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, err +} + +// OutputLines is like os/exec's cmd.Output(), +// but over our Cmd interface, and instead of returning the byte buffer of +// stdout, it scans these for lines and returns a slice of output lines +func OutputLines(cmd Cmd) (lines []string, err error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + err = cmd.Run() + scanner := bufio.NewScanner(&buff) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, err +} + +// Output is like os/exec's cmd.Output, but over our Cmd interface +func Output(cmd Cmd) ([]byte, error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + err := cmd.Run() + return buff.Bytes(), err +} + +// InheritOutput sets cmd's output to write to the current process's stdout and stderr +func InheritOutput(cmd Cmd) Cmd { + cmd.SetStderr(os.Stderr) + cmd.SetStdout(os.Stdout) + return cmd +} + +// RunWithStdoutReader runs cmd with stdout piped to readerFunc +func RunWithStdoutReader(cmd Cmd, readerFunc func(io.Reader) error) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + cmd.SetStdout(pw) + + return errors.AggregateConcurrent([]func() error{ + func() error { + defer pr.Close() + return readerFunc(pr) + }, + func() error { + defer pw.Close() + return cmd.Run() + }, + }) +} + +// RunWithStdinWriter runs cmd with writerFunc piped to stdin +func RunWithStdinWriter(cmd Cmd, writerFunc func(io.Writer) error) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + cmd.SetStdin(pr) + + return errors.AggregateConcurrent([]func() error{ + func() error { + defer pw.Close() + return writerFunc(pw) + }, + func() error { + defer pr.Close() + return cmd.Run() + }, + }) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/local.go b/vendor/sigs.k8s.io/kind/pkg/exec/local.go new file mode 100644 index 000000000..65bb73c9a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/local.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bytes" + "context" + "io" + osexec "os/exec" + "sync" + + "sigs.k8s.io/kind/pkg/errors" +) + +// LocalCmd wraps os/exec.Cmd, implementing the kind/pkg/exec.Cmd interface +type LocalCmd struct { + *osexec.Cmd +} + +var _ Cmd = &LocalCmd{} + +// LocalCmder is a factory for LocalCmd, implementing Cmder +type LocalCmder struct{} + +var _ Cmder = &LocalCmder{} + +// Command returns a new exec.Cmd backed by Cmd +func (c *LocalCmder) Command(name string, arg ...string) Cmd { + return &LocalCmd{ + Cmd: osexec.Command(name, arg...), + } +} + +// CommandContext is like Command but includes a context +func (c *LocalCmder) CommandContext(ctx context.Context, name string, arg ...string) Cmd { + return &LocalCmd{ + Cmd: osexec.CommandContext(ctx, name, arg...), + } +} + +// SetEnv sets env +func (cmd *LocalCmd) SetEnv(env ...string) Cmd { + cmd.Env = env + return cmd +} + +// SetStdin sets stdin +func (cmd *LocalCmd) SetStdin(r io.Reader) Cmd { + cmd.Stdin = r + return cmd +} + +// SetStdout set stdout +func (cmd *LocalCmd) SetStdout(w io.Writer) Cmd { + cmd.Stdout = w + return cmd +} + +// SetStderr sets stderr +func (cmd *LocalCmd) SetStderr(w io.Writer) Cmd { + cmd.Stderr = w + return cmd +} + +// Run runs the command +// If the returned error is non-nil, it should be of type *RunError +func (cmd *LocalCmd) Run() error { + // Background: + // Go's stdlib will setup and use a shared fd when cmd.Stderr == cmd.Stdout + // In any other case, it will use different fds, which will involve + // two different io.Copy goroutines writing to cmd.Stderr and cmd.Stdout + // + // Given this, we must synchronize capturing the output to a buffer + // IFF ! interfaceEqual(cmd.Sterr, cmd.Stdout) + var combinedOutput bytes.Buffer + var combinedOutputWriter io.Writer = &combinedOutput + if cmd.Stdout == nil && cmd.Stderr == nil { + // Case 1: If stdout and stderr are nil, we can just use the buffer + // The buffer will be == and Go will use one fd / goroutine + cmd.Stdout = combinedOutputWriter + cmd.Stderr = combinedOutputWriter + } else if interfaceEqual(cmd.Stdout, cmd.Stderr) { + // Case 2: If cmd.Stdout == cmd.Stderr go will still share the fd, + // but we need to wrap with a MultiWriter to respect the other writer + // and our buffer. + // The MultiWriter will be == and Go will use one fd / goroutine + cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter) + cmd.Stderr = cmd.Stdout + } else { + // Case 3: If cmd.Stdout != cmd.Stderr, we need to synchronize the + // combined output writer. + // Go will use different fds / write routines for stdout and stderr + combinedOutputWriter = &mutexWriter{ + writer: &combinedOutput, + } + // wrap writers if non-nil + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter) + } else { + cmd.Stdout = combinedOutputWriter + } + if cmd.Stderr != nil { + cmd.Stderr = io.MultiWriter(cmd.Stderr, combinedOutputWriter) + } else { + cmd.Stderr = combinedOutputWriter + } + } + // TODO: should be in the caller or logger should be injected somehow ... + if err := cmd.Cmd.Run(); err != nil { + return errors.WithStack(&RunError{ + Command: cmd.Args, + Output: combinedOutput.Bytes(), + Inner: err, + }) + } + return nil +} + +// interfaceEqual protects against panics from doing equality tests on +// two interfaces with non-comparable underlying types. +// This trivial is borrowed from the go stdlib in os/exec +// Note that the recover will only happen if a is not comparable to b, +// in which case we'll return false +// We've lightly modified this to pass errcheck (explicitly ignoring recover) +func interfaceEqual(a, b interface{}) bool { + defer func() { + _ = recover() + }() + return a == b +} + +// mutexWriter is a simple synchronized wrapper around an io.Writer +type mutexWriter struct { + writer io.Writer + mu sync.Mutex +} + +func (m *mutexWriter) Write(b []byte) (int, error) { + m.mu.Lock() + defer m.mu.Unlock() + n, err := m.writer.Write(b) + return n, err +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/types.go b/vendor/sigs.k8s.io/kind/pkg/exec/types.go new file mode 100644 index 000000000..4ce60431e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "fmt" + "io" +) + +// Cmd abstracts over running a command somewhere, this is useful for testing +type Cmd interface { + // Run executes the command (like os/exec.Cmd.Run), it should return + // a *RunError if there is any error + Run() error + // Each entry should be of the form "key=value" + SetEnv(...string) Cmd + SetStdin(io.Reader) Cmd + SetStdout(io.Writer) Cmd + SetStderr(io.Writer) Cmd +} + +// Cmder abstracts over creating commands +type Cmder interface { + // command, args..., just like os/exec.Cmd + Command(string, ...string) Cmd + CommandContext(context.Context, string, ...string) Cmd +} + +// RunError represents an error running a Cmd +type RunError struct { + Command []string // [Name Args...] + Output []byte // Captured Stdout / Stderr of the command + Inner error // Underlying error if any +} + +var _ error = &RunError{} + +func (e *RunError) Error() string { + // TODO(BenTheElder): implement formatter, and show output for %+v ? + return fmt.Sprintf("command \"%s\" failed with error: %v", e.PrettyCommand(), e.Inner) +} + +// PrettyCommand pretty prints the command in a way that could be pasted +// into a shell +func (e *RunError) PrettyCommand() string { + return PrettyCommand(e.Command[0], e.Command[1:]...) +} + +// Cause mimics github.com/pkg/errors's Cause pattern for errors +func (e *RunError) Cause() error { + if e.Inner != nil { + return e.Inner + } + return e +} diff --git a/vendor/sigs.k8s.io/kind/pkg/fs/fs.go b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go new file mode 100644 index 000000000..7fb4eae33 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go @@ -0,0 +1,156 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fs contains utilities for interacting with the host filesystem +// in a docker friendly way +// TODO(bentheelder): this should be internal +package fs + +import ( + "io" + "os" + "path" + "path/filepath" + "runtime" + "strings" +) + +// TempDir is like os.MkdirTemp, but more docker friendly +func TempDir(dir, prefix string) (name string, err error) { + // create a tempdir as normal + name, err = os.MkdirTemp(dir, prefix) + if err != nil { + return "", err + } + // on macOS $TMPDIR is typically /var/..., which is not mountable + // /private/var/... is the mountable equivalent + if runtime.GOOS == "darwin" && strings.HasPrefix(name, "/var/") { + name = filepath.Join("/private", name) + } + return name, nil +} + +// IsAbs is like filepath.IsAbs but also considering posix absolute paths +// to be absolute even if filepath.IsAbs would not +// This fixes the case of Posix paths on Windows +func IsAbs(hostPath string) bool { + return path.IsAbs(hostPath) || filepath.IsAbs(hostPath) +} + +// Copy recursively directories, symlinks, files copies from src to dst +// Copy will make dirs as necessary, and keep file modes +// Symlinks will be dereferenced similar to `cp -r src dst` +func Copy(src, dst string) error { + // get source info + info, err := os.Lstat(src) + if err != nil { + return err + } + // make sure dest dir exists + if err := os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil { + return err + } + // do real copy work + return copy(src, dst, info) +} + +func copy(src, dst string, info os.FileInfo) error { + if info.Mode()&os.ModeSymlink != 0 { + return copySymlink(src, dst) + } + if info.IsDir() { + return copyDir(src, dst, info) + } + return copyFile(src, dst, info) +} + +// CopyFile copies a file from src to dst +func CopyFile(src, dst string) (err error) { + // get source information + info, err := os.Stat(src) + if err != nil { + return err + } + return copyFile(src, dst, info) +} + +func copyFile(src, dst string, info os.FileInfo) error { + // open src for reading + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + // create dst file + // this is like f, err := os.Create(dst); os.Chmod(f.Name(), src.Mode()) + out, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + // make sure we close the file + defer func() { + closeErr := out.Close() + // if we weren't returning an error + if err == nil { + err = closeErr + } + }() + // actually copy + if _, err = io.Copy(out, in); err != nil { + return err + } + err = out.Sync() + return err +} + +// copySymlink dereferences and then copies a symlink +func copySymlink(src, dst string) error { + // read through the symlink + realSrc, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + info, err := os.Lstat(realSrc) + if err != nil { + return err + } + // copy the underlying contents + return copy(realSrc, dst, info) +} + +func copyDir(src, dst string, info os.FileInfo) error { + // make sure the target dir exists + if err := os.MkdirAll(dst, info.Mode()); err != nil { + return err + } + // copy every source dir entry + entries, err := os.ReadDir(src) + if err != nil { + return err + } + for _, entry := range entries { + entrySrc := filepath.Join(src, entry.Name()) + entryDst := filepath.Join(dst, entry.Name()) + fileInfo, err := entry.Info() + if err != nil { + return err + } + if err := copy(entrySrc, entryDst, fileInfo); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go new file mode 100644 index 000000000..79d3387fa --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go @@ -0,0 +1,34 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +// ClusterHasIPv6 returns true if the cluster should have IPv6 enabled due to either +// being IPv6 cluster family or Dual Stack +func ClusterHasIPv6(c *Cluster) bool { + return c.Networking.IPFamily == IPv6Family || c.Networking.IPFamily == DualStackFamily +} + +// ClusterHasImplicitLoadBalancer returns true if this cluster has an implicit api-server LoadBalancer +func ClusterHasImplicitLoadBalancer(c *Cluster) bool { + controlPlanes := 0 + for _, node := range c.Nodes { + if node.Role == ControlPlaneRole { + controlPlanes++ + } + } + return controlPlanes > 1 +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go new file mode 100644 index 000000000..2df4b7513 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + v1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" +) + +// Convertv1alpha4 converts a v1alpha4 cluster to a cluster at the internal API version +func Convertv1alpha4(in *v1alpha4.Cluster) *Cluster { + in = in.DeepCopy() // deep copy first to avoid touching the original + out := &Cluster{ + Name: in.Name, + Nodes: make([]Node, len(in.Nodes)), + FeatureGates: in.FeatureGates, + RuntimeConfig: in.RuntimeConfig, + KubeadmConfigPatches: in.KubeadmConfigPatches, + KubeadmConfigPatchesJSON6902: make([]PatchJSON6902, len(in.KubeadmConfigPatchesJSON6902)), + ContainerdConfigPatches: in.ContainerdConfigPatches, + ContainerdConfigPatchesJSON6902: in.ContainerdConfigPatchesJSON6902, + } + + for i := range in.Nodes { + convertv1alpha4Node(&in.Nodes[i], &out.Nodes[i]) + } + + convertv1alpha4Networking(&in.Networking, &out.Networking) + + for i := range in.KubeadmConfigPatchesJSON6902 { + convertv1alpha4PatchJSON6902(&in.KubeadmConfigPatchesJSON6902[i], &out.KubeadmConfigPatchesJSON6902[i]) + } + + return out +} + +func convertv1alpha4Node(in *v1alpha4.Node, out *Node) { + out.Role = NodeRole(in.Role) + out.Image = in.Image + + out.Labels = in.Labels + out.KubeadmConfigPatches = in.KubeadmConfigPatches + out.ExtraMounts = make([]Mount, len(in.ExtraMounts)) + out.ExtraPortMappings = make([]PortMapping, len(in.ExtraPortMappings)) + out.KubeadmConfigPatchesJSON6902 = make([]PatchJSON6902, len(in.KubeadmConfigPatchesJSON6902)) + + for i := range in.ExtraMounts { + convertv1alpha4Mount(&in.ExtraMounts[i], &out.ExtraMounts[i]) + } + + for i := range in.ExtraPortMappings { + convertv1alpha4PortMapping(&in.ExtraPortMappings[i], &out.ExtraPortMappings[i]) + } + + for i := range in.KubeadmConfigPatchesJSON6902 { + convertv1alpha4PatchJSON6902(&in.KubeadmConfigPatchesJSON6902[i], &out.KubeadmConfigPatchesJSON6902[i]) + } +} + +func convertv1alpha4PatchJSON6902(in *v1alpha4.PatchJSON6902, out *PatchJSON6902) { + out.Group = in.Group + out.Version = in.Version + out.Kind = in.Kind + out.Patch = in.Patch +} + +func convertv1alpha4Networking(in *v1alpha4.Networking, out *Networking) { + out.IPFamily = ClusterIPFamily(in.IPFamily) + out.APIServerPort = in.APIServerPort + out.APIServerAddress = in.APIServerAddress + out.PodSubnet = in.PodSubnet + out.KubeProxyMode = ProxyMode(in.KubeProxyMode) + out.ServiceSubnet = in.ServiceSubnet + out.DisableDefaultCNI = in.DisableDefaultCNI + out.DNSSearch = in.DNSSearch +} + +func convertv1alpha4Mount(in *v1alpha4.Mount, out *Mount) { + out.ContainerPath = in.ContainerPath + out.HostPath = in.HostPath + out.Readonly = in.Readonly + out.SelinuxRelabel = in.SelinuxRelabel + out.Propagation = MountPropagation(in.Propagation) +} + +func convertv1alpha4PortMapping(in *v1alpha4.PortMapping, out *PortMapping) { + out.ContainerPort = in.ContainerPort + out.HostPort = in.HostPort + out.ListenAddress = in.ListenAddress + out.Protocol = PortMappingProtocol(in.Protocol) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go new file mode 100644 index 000000000..7a93df802 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this comment makes golint ignore this file, feel free to edit the file. +// Code generated by not-actually-generated-but-go-away-golint. DO NOT EDIT. +// https://github.com/kubernetes/code-generator/issues/30 + +package config + +import ( + "sigs.k8s.io/kind/pkg/apis/config/defaults" + "sigs.k8s.io/kind/pkg/cluster/constants" +) + +// SetDefaultsCluster sets uninitialized fields to their default value. +func SetDefaultsCluster(obj *Cluster) { + // default cluster name + if obj.Name == "" { + obj.Name = constants.DefaultClusterName + } + + // default to a one node cluster + if len(obj.Nodes) == 0 { + obj.Nodes = []Node{ + { + Image: defaults.Image, + Role: ControlPlaneRole, + }, + } + } + + // default nodes + for i := range obj.Nodes { + a := &obj.Nodes[i] + SetDefaultsNode(a) + } + if obj.Networking.IPFamily == "" { + obj.Networking.IPFamily = IPv4Family + } + + // default to listening on 127.0.0.1:randomPort on ipv4 + // and [::1]:randomPort on ipv6 + if obj.Networking.APIServerAddress == "" { + obj.Networking.APIServerAddress = "127.0.0.1" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.APIServerAddress = "::1" + } + } + + // default the pod CIDR + if obj.Networking.PodSubnet == "" { + obj.Networking.PodSubnet = "10.244.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + // node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices + // xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else- + obj.Networking.PodSubnet = "fd00:10:244::/56" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.PodSubnet = "10.244.0.0/16,fd00:10:244::/56" + } + } + + // default the service CIDR using the kubeadm default + // https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32 + // Note: kubeadm is using a /12 subnet, that may allocate a 2^20 bitmap in etcd + // we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services) + if obj.Networking.ServiceSubnet == "" { + obj.Networking.ServiceSubnet = "10.96.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.ServiceSubnet = "fd00:10:96::/112" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.ServiceSubnet = "10.96.0.0/16,fd00:10:96::/112" + } + } + // default the KubeProxyMode using iptables as it's already the default + if obj.Networking.KubeProxyMode == "" { + obj.Networking.KubeProxyMode = IPTablesProxyMode + } +} + +// SetDefaultsNode sets uninitialized fields to their default value. +func SetDefaultsNode(obj *Node) { + if obj.Image == "" { + obj.Image = defaults.Image + } + + if obj.Role == "" { + obj.Role = ControlPlaneRole + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go new file mode 100644 index 000000000..520aac207 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the current apiVersion of the `kind` Config +// along with some common abstractions +// +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=sigs.k8s.io/kind/pkg/internal/apis/config +// +k8s:defaulter-gen=TypeMeta +package config diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go new file mode 100644 index 000000000..eded931b3 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encoding + +import ( + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// V1Alpha4ToInternal converts to the internal API version +func V1Alpha4ToInternal(cluster *v1alpha4.Cluster) *config.Cluster { + v1alpha4.SetDefaultsCluster(cluster) + return config.Convertv1alpha4(cluster) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go new file mode 100644 index 000000000..f6f90a9ef --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package encoding implements utilities for decoding from yaml the `kind` Config +package encoding diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go new file mode 100644 index 000000000..718d5e46b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encoding + +import ( + "bytes" + "os" + + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// Load reads the file at path and attempts to convert into a `kind` Config; the file +// can be one of the different API versions defined in scheme. +// If path == "" then the default config is returned +// If path == "-" then reads from stdin +func Load(path string) (*config.Cluster, error) { + // special case: empty path -> default config + // TODO(bentheelder): consider removing this + if path == "" { + out := &config.Cluster{} + config.SetDefaultsCluster(out) + return out, nil + } + + // read in file + raw, err := os.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "error reading file") + } + + return Parse(raw) +} + +// Parse parses a cluster config from raw (yaml) bytes +// It will always return the current internal version after defaulting and +// conversion from the read version +func Parse(raw []byte) (*config.Cluster, error) { + // get kind & apiVersion + tm := typeMeta{} + if err := yaml.Unmarshal(raw, &tm); err != nil { + return nil, errors.Wrap(err, "could not determine kind / apiVersion for config") + } + + // decode specific (apiVersion, kind) + switch tm.APIVersion { + // handle v1alpha4 + case "kind.x-k8s.io/v1alpha4": + if tm.Kind != "Cluster" { + return nil, errors.Errorf("unknown kind %s for apiVersion: %s", tm.Kind, tm.APIVersion) + } + // load version + cfg := &v1alpha4.Cluster{} + if err := yamlUnmarshalStrict(raw, cfg); err != nil { + return nil, errors.Wrap(err, "unable to decode config") + } + // apply defaults for version and convert + return V1Alpha4ToInternal(cfg), nil + } + + // unknown apiVersion if we haven't already returned ... + return nil, errors.Errorf("unknown apiVersion: %s", tm.APIVersion) +} + +// basically metav1.TypeMeta, but with yaml tags +type typeMeta struct { + Kind string `yaml:"kind,omitempty"` + APIVersion string `yaml:"apiVersion,omitempty"` +} + +func yamlUnmarshalStrict(raw []byte, v interface{}) error { + d := yaml.NewDecoder(bytes.NewReader(raw)) + d.KnownFields(true) + return d.Decode(v) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go new file mode 100644 index 000000000..fed300079 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go @@ -0,0 +1,275 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +/* +NOTE: unlike the public types these should not have serialization tags and +should stay 100% internal. These are used to pass around the processed public +config for internal usage. +*/ + +// Cluster contains kind cluster configuration +type Cluster struct { + // The cluster name. + // Optional, this will be overridden by --name / KIND_CLUSTER_NAME + Name string + + // Nodes contains the list of nodes defined in the `kind` Cluster + // If unset this will default to a single control-plane node + // Note that if more than one control plane is specified, an external + // control plane load balancer will be provisioned implicitly + Nodes []Node + + /* Advanced fields */ + + // Networking contains cluster wide network settings + Networking Networking + + // FeatureGates contains a map of Kubernetes feature gates to whether they + // are enabled. The feature gates specified here are passed to all Kubernetes components as flags or in config. + // + // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + FeatureGates map[string]bool + + // RuntimeConfig Keys and values are translated into --runtime-config values for kube-apiserver, separated by commas. + // + // Use this to enable alpha APIs. + RuntimeConfig map[string]string + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // strategic merge patches to `kustomize build` internally + // https://github.com/kubernetes/community/blob/a9cf5c8f3380bb52ebe57b1e2dbdec136d8dd484/contributors/devel/sig-api-machinery/strategic-merge-patch.md + // This should be an inline yaml blob-string + KubeadmConfigPatches []string + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as patchesJson6902 to `kustomize build` + KubeadmConfigPatchesJSON6902 []PatchJSON6902 + + // ContainerdConfigPatches are applied to every node's containerd config + // in the order listed. + // These should be toml stringsto be applied as merge patches + ContainerdConfigPatches []string + + // ContainerdConfigPatchesJSON6902 are applied to every node's containerd config + // in the order listed. + // These should be YAML or JSON formatting RFC 6902 JSON patches + ContainerdConfigPatchesJSON6902 []string +} + +// Node contains settings for a node in the `kind` Cluster. +// A node in kind config represent a container that will be provisioned with all the components +// required for the assigned role in the Kubernetes cluster +type Node struct { + // Role defines the role of the node in the in the Kubernetes cluster + // created by kind + // + // Defaults to "control-plane" + Role NodeRole + + // Image is the node image to use when creating this node + // If unset a default image will be used, see defaults.Image + Image string + + // Labels are the labels with which the respective node will be labeled + Labels map[string]string + + /* Advanced fields */ + + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + ExtraMounts []Mount + + // ExtraPortMappings describes additional port mappings for the node container + // binded to a host Port + ExtraPortMappings []PortMapping + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // strategic merge patches to `kustomize build` internally + // https://github.com/kubernetes/community/blob/a9cf5c8f3380bb52ebe57b1e2dbdec136d8dd484/contributors/devel/sig-api-machinery/strategic-merge-patch.md + // This should be an inline yaml blob-string + KubeadmConfigPatches []string + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as patchesJson6902 to `kustomize build` + KubeadmConfigPatchesJSON6902 []PatchJSON6902 +} + +// NodeRole defines possible role for nodes in a Kubernetes cluster managed by `kind` +type NodeRole string + +const ( + // ControlPlaneRole identifies a node that hosts a Kubernetes control-plane. + // NOTE: in single node clusters, control-plane nodes act also as a worker + // nodes, in which case the taint will be removed. see: + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#control-plane-node-isolation + ControlPlaneRole NodeRole = "control-plane" + // WorkerRole identifies a node that hosts a Kubernetes worker + WorkerRole NodeRole = "worker" +) + +// Networking contains cluster wide network settings +type Networking struct { + // IPFamily is the network cluster model, currently it can be ipv4 or ipv6 + IPFamily ClusterIPFamily + // APIServerPort is the listen port on the host for the Kubernetes API Server + // Defaults to a random port on the host obtained by kind + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + APIServerPort int32 + // APIServerAddress is the listen address on the host for the Kubernetes + // API Server. This should be an IP address. + // + // Defaults to 127.0.0.1 + APIServerAddress string + // PodSubnet is the CIDR used for pod IPs + // kind will select a default if unspecified + PodSubnet string + // ServiceSubnet is the CIDR used for services VIPs + // kind will select a default if unspecified + ServiceSubnet string + // If DisableDefaultCNI is true, kind will not install the default CNI setup. + // Instead the user should install their own CNI after creating the cluster. + DisableDefaultCNI bool + // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + KubeProxyMode ProxyMode + // DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host. + DNSSearch *[]string +} + +// ClusterIPFamily defines cluster network IP family +type ClusterIPFamily string + +const ( + // IPv4Family sets ClusterIPFamily to ipv4 + IPv4Family ClusterIPFamily = "ipv4" + // IPv6Family sets ClusterIPFamily to ipv6 + IPv6Family ClusterIPFamily = "ipv6" + // DualStackFamily sets ClusterIPFamily to dual + DualStackFamily ClusterIPFamily = "dual" +) + +// ProxyMode defines a proxy mode for kube-proxy +type ProxyMode string + +const ( + // IPTablesProxyMode sets ProxyMode to iptables + IPTablesProxyMode ProxyMode = "iptables" + // IPVSProxyMode sets ProxyMode to ipvs + IPVSProxyMode ProxyMode = "ipvs" + // NoneProxyMode disables kube-proxy + NoneProxyMode ProxyMode = "none" +) + +// PatchJSON6902 represents an inline kustomize json 6902 patch +// https://tools.ietf.org/html/rfc6902 +type PatchJSON6902 struct { + // these fields specify the patch target resource + Group string + Version string + Kind string + // Patch should contain the contents of the json patch as a string + Patch string +} + +// Mount specifies a host volume to mount into a container. +// This is a close copy of the upstream cri Mount type +// see: k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2 +// It additionally serializes the "propagation" field with the string enum +// names on disk as opposed to the int32 values, and the serialized field names +// have been made closer to core/v1 VolumeMount field names +// In yaml this looks like: +// +// containerPath: /foo +// hostPath: /bar +// readOnly: true +// selinuxRelabel: false +// propagation: None +// +// Propagation may be one of: None, HostToContainer, Bidirectional +type Mount struct { + // Path of the mount within the container. + ContainerPath string + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string + // If set, the mount is read-only. + Readonly bool + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool + // Requested propagation mode. + Propagation MountPropagation +} + +// PortMapping specifies a host port mapped into a container port. +// In yaml this looks like: +// +// containerPort: 80 +// hostPort: 8000 +// listenAddress: 127.0.0.1 +// protocol: TCP +type PortMapping struct { + // Port within the container. + ContainerPort int32 + // Port on the host. + // + // If unset, a random port will be selected. + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + HostPort int32 + // TODO: add protocol (tcp/udp) and port-ranges + ListenAddress string + // Protocol (TCP/UDP/SCTP) + Protocol PortMappingProtocol +} + +// MountPropagation represents an "enum" for mount propagation options, +// see also Mount. +type MountPropagation string + +const ( + // MountPropagationNone specifies that no mount propagation + // ("private" in Linux terminology). + MountPropagationNone MountPropagation = "None" + // MountPropagationHostToContainer specifies that mounts get propagated + // from the host to the container ("rslave" in Linux). + MountPropagationHostToContainer MountPropagation = "HostToContainer" + // MountPropagationBidirectional specifies that mounts get propagated from + // the host to the container and from the container to the host + // ("rshared" in Linux). + MountPropagationBidirectional MountPropagation = "Bidirectional" +) + +// PortMappingProtocol represents an "enum" for port mapping protocol options, +// see also PortMapping. +type PortMappingProtocol string + +const ( + // PortMappingProtocolTCP specifies TCP protocol + PortMappingProtocolTCP PortMappingProtocol = "TCP" + // PortMappingProtocolUDP specifies UDP protocol + PortMappingProtocolUDP PortMappingProtocol = "UDP" + // PortMappingProtocolSCTP specifies SCTP protocol + PortMappingProtocolSCTP PortMappingProtocol = "SCTP" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go new file mode 100644 index 000000000..68185d157 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go @@ -0,0 +1,271 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "net" + "regexp" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/sets" +) + +// similar to valid docker container names, but since we will prefix +// and suffix this name, we can relax it a little +// see NewContext() for usage +// https://godoc.org/github.com/docker/docker/daemon/names#pkg-constants +var validNameRE = regexp.MustCompile(`^[a-z0-9.-]+$`) + +// Validate returns a ConfigErrors with an entry for each problem +// with the config, or nil if there are none +func (c *Cluster) Validate() error { + errs := []error{} + + // validate the name + if !validNameRE.MatchString(c.Name) { + errs = append(errs, errors.Errorf("'%s' is not a valid cluster name, cluster names must match `%s`", + c.Name, validNameRE.String())) + } + + // the api server port only needs checking if we aren't picking a random one + // at runtime + if c.Networking.APIServerPort != 0 { + // validate api server listen port + if err := validatePort(c.Networking.APIServerPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid apiServerPort")) + } + } + + // podSubnet should be a valid CIDR + if err := validateSubnets(c.Networking.PodSubnet, c.Networking.IPFamily); err != nil { + errs = append(errs, errors.Errorf("invalid pod subnet %v", err)) + } + + // serviceSubnet should be a valid CIDR + if err := validateSubnets(c.Networking.ServiceSubnet, c.Networking.IPFamily); err != nil { + errs = append(errs, errors.Errorf("invalid service subnet %v", err)) + } + + // KubeProxyMode should be iptables or ipvs + if c.Networking.KubeProxyMode != IPTablesProxyMode && c.Networking.KubeProxyMode != IPVSProxyMode && + c.Networking.KubeProxyMode != NoneProxyMode { + errs = append(errs, errors.Errorf("invalid kubeProxyMode: %s", c.Networking.KubeProxyMode)) + } + + // validate nodes + numByRole := make(map[NodeRole]int32) + // All nodes in the config should be valid + for i, n := range c.Nodes { + // validate the node + if err := n.Validate(); err != nil { + errs = append(errs, errors.Errorf("invalid configuration for node %d: %v", i, err)) + } + // update role count + if num, ok := numByRole[n.Role]; ok { + numByRole[n.Role] = 1 + num + } else { + numByRole[n.Role] = 1 + } + } + + // there must be at least one control plane node + numControlPlane, anyControlPlane := numByRole[ControlPlaneRole] + if !anyControlPlane || numControlPlane < 1 { + errs = append(errs, errors.Errorf("must have at least one %s node", string(ControlPlaneRole))) + } + + if len(errs) > 0 { + return errors.NewAggregate(errs) + } + return nil +} + +// Validate returns a ConfigErrors with an entry for each problem +// with the Node, or nil if there are none +func (n *Node) Validate() error { + errs := []error{} + + // validate node role should be one of the expected values + switch n.Role { + case ControlPlaneRole, + WorkerRole: + default: + errs = append(errs, errors.Errorf("%q is not a valid node role", n.Role)) + } + + // image should be defined + if n.Image == "" { + errs = append(errs, errors.New("image is a required field")) + } + + // validate extra port forwards + for _, mapping := range n.ExtraPortMappings { + if err := validatePort(mapping.HostPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid hostPort")) + } + + if err := validatePort(mapping.ContainerPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid containerPort")) + } + } + + if err := validatePortMappings(n.ExtraPortMappings); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid portMapping")) + } + + if len(errs) > 0 { + return errors.NewAggregate(errs) + } + + return nil +} + +func validatePortMappings(portMappings []PortMapping) error { + errMsg := "port mapping with same listen address, port and protocol already configured" + + wildcardAddrIPv4 := net.ParseIP("0.0.0.0") + wildcardAddrIPv6 := net.ParseIP("::") + + // bindMap has the following key-value structure + // PORT/PROTOCOL: [ IP ] + // { 80/TCP: [ 127.0.0.1, 192.168.2.3 ], 80/UDP: [ 0.0.0.0 ] } + bindMap := make(map[string]sets.String) + + formatPortProtocol := func(port int32, protocol PortMappingProtocol) string { + return fmt.Sprintf("%d/%s", port, protocol) + } + + for _, portMapping := range portMappings { + addr := net.ParseIP(portMapping.ListenAddress) + addrString := addr.String() + + portProtocol := formatPortProtocol(portMapping.HostPort, portMapping.Protocol) + possibleErr := fmt.Errorf("%s: %s:%s", errMsg, addrString, portProtocol) + + // in golang 0.0.0.0 and [::] are equivalent, convert [::] -> 0.0.0.0 + // https://github.com/golang/go/issues/48723 + if addr.Equal(wildcardAddrIPv6) { + addr = wildcardAddrIPv4 + addrString = addr.String() + } + + if _, ok := bindMap[portProtocol]; ok { + + // wildcard address case: + // return error if there already exists any listen address for same port and protocol + if addr.Equal(wildcardAddrIPv4) { + if bindMap[portProtocol].Len() > 0 { + return possibleErr + } + } + + // direct duplicate & wild card present check: + // return error if same combination of ip, port and protocol already exists in bindMap. + // return error if wildcard address is already present for same port & protocol + if bindMap[portProtocol].Has(addrString) || bindMap[portProtocol].Has(wildcardAddrIPv4.String()) { + return possibleErr + } + } else { + // initialize the set + bindMap[portProtocol] = sets.NewString() + } + + // add the entry to bindMap + bindMap[portProtocol].Insert(addrString) + } + return nil +} + +func validatePort(port int32) error { + // NOTE: -1 is a special value for auto-selecting the port in the container + // backend where possible as opposed to in kind itself. + if port < -1 || port > 65535 { + return errors.Errorf("invalid port number: %d", port) + } + return nil +} + +func validateSubnets(subnetStr string, ipFamily ClusterIPFamily) error { + allErrs := []error{} + + cidrsString := strings.Split(subnetStr, ",") + subnets := make([]*net.IPNet, 0, len(cidrsString)) + for _, cidrString := range cidrsString { + _, cidr, err := net.ParseCIDR(cidrString) + if err != nil { + return fmt.Errorf("failed to parse cidr value:%q with error: %v", cidrString, err) + } + subnets = append(subnets, cidr) + } + + dualstack := ipFamily == DualStackFamily + switch { + // if no subnets are defined + case len(subnets) == 0: + allErrs = append(allErrs, errors.New("no subnets defined")) + // if DualStack only 2 CIDRs allowed + case dualstack && len(subnets) > 2: + allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking")) + // if DualStack and there are 2 CIDRs validate if there is at least one of each IP family + case dualstack && len(subnets) == 2: + areDualStackCIDRs, err := isDualStackCIDRs(subnets) + if err != nil { + allErrs = append(allErrs, err) + } else if !areDualStackCIDRs { + allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking")) + } + // if not DualStack only one CIDR allowed + case !dualstack && len(subnets) > 1: + allErrs = append(allErrs, errors.New("only one CIDR allowed for single-stack networking")) + case ipFamily == IPv4Family && subnets[0].IP.To4() == nil: + allErrs = append(allErrs, errors.New("expected IPv4 CIDR for IPv4 family")) + case ipFamily == IPv6Family && subnets[0].IP.To4() != nil: + allErrs = append(allErrs, errors.New("expected IPv6 CIDR for IPv6 family")) + } + + if len(allErrs) > 0 { + return errors.NewAggregate(allErrs) + } + return nil +} + +// isDualStackCIDRs returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func isDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for _, cidr := range cidrs { + if cidr == nil { + return false, fmt.Errorf("cidr %v is invalid", cidr) + } + + if v4Found && v6Found { + continue + } + + if cidr.IP != nil && cidr.IP.To4() == nil { + v6Found = true + continue + } + v4Found = true + } + + return v4Found && v6Found, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go new file mode 100644 index 000000000..6c86691fc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,196 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Networking.DeepCopyInto(&out.Networking) + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatches != nil { + in, out := &in.ContainerdConfigPatches, &out.ContainerdConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatchesJSON6902 != nil { + in, out := &in.ContainerdConfigPatchesJSON6902, &out.ContainerdConfigPatchesJSON6902 + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.DNSSearch != nil { + in, out := &in.DNSSearch, &out.DNSSearch + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.ExtraPortMappings != nil { + in, out := &in.ExtraPortMappings, &out.ExtraPortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchJSON6902) DeepCopyInto(out *PatchJSON6902) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchJSON6902. +func (in *PatchJSON6902) DeepCopy() *PatchJSON6902 { + if in == nil { + return nil + } + out := new(PatchJSON6902) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go new file mode 100644 index 000000000..73eff7a1d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go @@ -0,0 +1,255 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "bytes" + "fmt" + "io" + "runtime" + "strings" + "sync" + "sync/atomic" + + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/internal/env" +) + +// Logger is the kind cli's log.Logger implementation +type Logger struct { + writer io.Writer + writerMu sync.Mutex + verbosity log.Level + bufferPool *bufferPool + // kind special additions + isSmartWriter bool +} + +var _ log.Logger = &Logger{} + +// NewLogger returns a new Logger with the given verbosity +func NewLogger(writer io.Writer, verbosity log.Level) *Logger { + l := &Logger{ + verbosity: verbosity, + bufferPool: newBufferPool(), + } + l.SetWriter(writer) + return l +} + +// SetWriter sets the output writer +func (l *Logger) SetWriter(w io.Writer) { + l.writerMu.Lock() + defer l.writerMu.Unlock() + l.writer = w + _, isSpinner := w.(*Spinner) + l.isSmartWriter = isSpinner || env.IsSmartTerminal(w) +} + +// ColorEnabled returns true if the caller is OK to write colored output +func (l *Logger) ColorEnabled() bool { + l.writerMu.Lock() + defer l.writerMu.Unlock() + return l.isSmartWriter +} + +func (l *Logger) getVerbosity() log.Level { + return log.Level(atomic.LoadInt32((*int32)(&l.verbosity))) +} + +// SetVerbosity sets the loggers verbosity +func (l *Logger) SetVerbosity(verbosity log.Level) { + atomic.StoreInt32((*int32)(&l.verbosity), int32(verbosity)) +} + +// synchronized write to the inner writer +func (l *Logger) write(p []byte) (n int, err error) { + l.writerMu.Lock() + defer l.writerMu.Unlock() + return l.writer.Write(p) +} + +// writeBuffer writes buf with write, ensuring there is a trailing newline +func (l *Logger) writeBuffer(buf *bytes.Buffer) { + // ensure trailing newline + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + // TODO: should we handle this somehow?? + // Who logs for the logger? 🤔 + _, _ = l.write(buf.Bytes()) +} + +// print writes a simple string to the log writer +func (l *Logger) print(message string) { + buf := bytes.NewBufferString(message) + l.writeBuffer(buf) +} + +// printf is roughly fmt.Fprintf against the log writer +func (l *Logger) printf(format string, args ...interface{}) { + buf := l.bufferPool.Get() + fmt.Fprintf(buf, format, args...) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// addDebugHeader inserts the debug line header to buf +func addDebugHeader(buf *bytes.Buffer) { + _, file, line, ok := runtime.Caller(3) + // lifted from klog + if !ok { + file = "???" + line = 1 + } else { + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } + } + buf.Grow(len(file) + 11) // we know at least this many bytes are needed + buf.WriteString("DEBUG: ") + buf.WriteString(file) + buf.WriteByte(':') + fmt.Fprintf(buf, "%d", line) + buf.WriteByte(']') + buf.WriteByte(' ') +} + +// debug is like print but with a debug log header +func (l *Logger) debug(message string) { + buf := l.bufferPool.Get() + addDebugHeader(buf) + buf.WriteString(message) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// debugf is like printf but with a debug log header +func (l *Logger) debugf(format string, args ...interface{}) { + buf := l.bufferPool.Get() + addDebugHeader(buf) + fmt.Fprintf(buf, format, args...) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// Warn is part of the log.Logger interface +func (l *Logger) Warn(message string) { + l.print(message) +} + +// Warnf is part of the log.Logger interface +func (l *Logger) Warnf(format string, args ...interface{}) { + l.printf(format, args...) +} + +// Error is part of the log.Logger interface +func (l *Logger) Error(message string) { + l.print(message) +} + +// Errorf is part of the log.Logger interface +func (l *Logger) Errorf(format string, args ...interface{}) { + l.printf(format, args...) +} + +// V is part of the log.Logger interface +func (l *Logger) V(level log.Level) log.InfoLogger { + return infoLogger{ + logger: l, + level: level, + enabled: level <= l.getVerbosity(), + } +} + +// infoLogger implements log.InfoLogger for Logger +type infoLogger struct { + logger *Logger + level log.Level + enabled bool +} + +// Enabled is part of the log.InfoLogger interface +func (i infoLogger) Enabled() bool { + return i.enabled +} + +// Info is part of the log.InfoLogger interface +func (i infoLogger) Info(message string) { + if !i.enabled { + return + } + // for > 0, we are writing debug messages, include extra info + if i.level > 0 { + i.logger.debug(message) + } else { + i.logger.print(message) + } +} + +// Infof is part of the log.InfoLogger interface +func (i infoLogger) Infof(format string, args ...interface{}) { + if !i.enabled { + return + } + // for > 0, we are writing debug messages, include extra info + if i.level > 0 { + i.logger.debugf(format, args...) + } else { + i.logger.printf(format, args...) + } +} + +// bufferPool is a type safe sync.Pool of *byte.Buffer, guaranteed to be Reset +type bufferPool struct { + sync.Pool +} + +// newBufferPool returns a new bufferPool +func newBufferPool() *bufferPool { + return &bufferPool{ + sync.Pool{ + New: func() interface{} { + // The Pool's New function should generally only return pointer + // types, since a pointer can be put into the return interface + // value without an allocation: + return new(bytes.Buffer) + }, + }, + } +} + +// Get obtains a buffer from the pool +func (b *bufferPool) Get() *bytes.Buffer { + return b.Pool.Get().(*bytes.Buffer) +} + +// Put returns a buffer to the pool, resetting it first +func (b *bufferPool) Put(x *bytes.Buffer) { + // only store small buffers to avoid pointless allocation + // avoid keeping arbitrarily large buffers + if x.Len() > 256 { + return + } + x.Reset() + b.Pool.Put(x) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go new file mode 100644 index 000000000..695106522 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go @@ -0,0 +1,18 @@ +package cli + +import ( + "os" + + "github.com/spf13/pflag" +) + +// OverrideDefaultName conditionally allows overriding the default cluster name +// by setting the KIND_CLUSTER_NAME environment variable +// only if --name wasn't set explicitly +func OverrideDefaultName(fs *pflag.FlagSet) { + if !fs.Changed("name") { + if name := os.Getenv("KIND_CLUSTER_NAME"); name != "" { + _ = fs.Set("name", name) + } + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go new file mode 100644 index 000000000..2af86a250 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go @@ -0,0 +1,168 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + "io" + "runtime" + "sync" + "time" +) + +// custom CLI loading spinner for kind +var spinnerFrames = []string{ + "⠈⠁", + "⠈⠑", + "⠈⠱", + "⠈⡱", + "⢀⡱", + "⢄⡱", + "⢄⡱", + "⢆⡱", + "⢎⡱", + "⢎⡰", + "⢎⡠", + "⢎⡀", + "⢎⠁", + "⠎⠁", + "⠊⠁", +} + +// Spinner is a simple and efficient CLI loading spinner used by kind +// It is simplistic and assumes that the line length will not change. +type Spinner struct { + stop chan struct{} // signals writer goroutine to stop from Stop() + stopped chan struct{} // signals Stop() that the writer goroutine stopped + mu *sync.Mutex // protects the mutable bits + // below are protected by mu + running bool + writer io.Writer + ticker *time.Ticker // signals that it is time to write a frame + prefix string + suffix string + // format string used to write a frame, depends on the host OS / terminal + frameFormat string +} + +// spinner implements writer +var _ io.Writer = &Spinner{} + +// NewSpinner initializes and returns a new Spinner that will write to w +// NOTE: w should be os.Stderr or similar, and it should be a Terminal +func NewSpinner(w io.Writer) *Spinner { + frameFormat := "\x1b[?7l\r%s%s%s\x1b[?7h" + // toggling wrapping seems to behave poorly on windows + // in general only the simplest escape codes behave well at the moment, + // and only in newer shells + if runtime.GOOS == "windows" { + frameFormat = "\r%s%s%s" + } + return &Spinner{ + stop: make(chan struct{}, 1), + stopped: make(chan struct{}), + mu: &sync.Mutex{}, + writer: w, + frameFormat: frameFormat, + } +} + +// SetPrefix sets the prefix to print before the spinner +func (s *Spinner) SetPrefix(prefix string) { + s.mu.Lock() + defer s.mu.Unlock() + s.prefix = prefix +} + +// SetSuffix sets the suffix to print after the spinner +func (s *Spinner) SetSuffix(suffix string) { + s.mu.Lock() + defer s.mu.Unlock() + s.suffix = suffix +} + +// Start starts the spinner running +func (s *Spinner) Start() { + s.mu.Lock() + defer s.mu.Unlock() + // don't start if we've already started + if s.running { + return + } + // flag that we've started + s.running = true + // start / create a frame ticker + s.ticker = time.NewTicker(time.Millisecond * 100) + // spin in the background + go func() { + // write frames forever (until signaled to stop) + for { + for _, frame := range spinnerFrames { + select { + // prefer stopping, select this signal first + case <-s.stop: + func() { + s.mu.Lock() + defer s.mu.Unlock() + s.ticker.Stop() // free up the ticker + s.running = false // mark as stopped (it's fine to start now) + s.stopped <- struct{}{} // tell Stop() that we're done + }() + return // ... and stop + // otherwise continue and write one frame + case <-s.ticker.C: + func() { + s.mu.Lock() + defer s.mu.Unlock() + fmt.Fprintf(s.writer, s.frameFormat, s.prefix, frame, s.suffix) + }() + } + } + } + }() +} + +// Stop signals the spinner to stop +func (s *Spinner) Stop() { + s.mu.Lock() + if !s.running { + s.mu.Unlock() + return + } + // try to stop, do nothing if channel is full (IE already busy stopping) + s.stop <- struct{}{} + s.mu.Unlock() + // wait for stop to be finished + <-s.stopped +} + +// Write implements io.Writer, interrupting the spinner and writing to +// the inner writer +func (s *Spinner) Write(p []byte) (n int, err error) { + // lock first, so nothing else can start writing until we are done + s.mu.Lock() + defer s.mu.Unlock() + // it the spinner is not running, just write directly + if !s.running { + return s.writer.Write(p) + } + // otherwise: we will rewrite the line first + if _, err := s.writer.Write([]byte("\r")); err != nil { + return 0, err + } + return s.writer.Write(p) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go new file mode 100644 index 000000000..d7ef7eb7a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + + "sigs.k8s.io/kind/pkg/log" +) + +// Status is used to track ongoing status in a CLI, with a nice loading spinner +// when attached to a terminal +type Status struct { + spinner *Spinner + status string + logger log.Logger + // for controlling coloring etc + successFormat string + failureFormat string +} + +// StatusForLogger returns a new status object for the logger l, +// if l is the kind cli logger and the writer is a Spinner, that spinner +// will be used for the status +func StatusForLogger(l log.Logger) *Status { + s := &Status{ + logger: l, + successFormat: " ✓ %s\n", + failureFormat: " ✗ %s\n", + } + // if we're using the CLI logger, check for if it has a spinner setup + // and wire the status to that + if v, ok := l.(*Logger); ok { + if v2, ok := v.writer.(*Spinner); ok { + s.spinner = v2 + // use colored success / failure messages + s.successFormat = " \x1b[32m✓\x1b[0m %s\n" + s.failureFormat = " \x1b[31m✗\x1b[0m %s\n" + } + } + return s +} + +// Start starts a new phase of the status, if attached to a terminal +// there will be a loading spinner with this status +func (s *Status) Start(status string) { + s.End(true) + // set new status + s.status = status + if s.spinner != nil { + s.spinner.SetSuffix(fmt.Sprintf(" %s ", s.status)) + s.spinner.Start() + } else { + s.logger.V(0).Infof(" • %s ...\n", s.status) + } +} + +// End completes the current status, ending any previous spinning and +// marking the status as success or failure +func (s *Status) End(success bool) { + if s.status == "" { + return + } + + if s.spinner != nil { + s.spinner.Stop() + fmt.Fprint(s.spinner.writer, "\r") + } + if success { + s.logger.V(0).Infof(s.successFormat, s.status) + } else { + s.logger.V(0).Infof(s.failureFormat, s.status) + } + + s.status = "" +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go b/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go new file mode 100644 index 000000000..5f809dd9f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "io" + "os" + "runtime" + + isatty "github.com/mattn/go-isatty" +) + +// a fake TTY type for testing that can only be implemented within this package +type isTestFakeTTY interface { + isTestFakeTTY() +} + +// IsTerminal returns true if the writer w is a terminal +func IsTerminal(w io.Writer) bool { + // check for internal fake type we can use for testing. + if _, ok := (w).(isTestFakeTTY); ok { + return true + } + // check for real terminals + if v, ok := (w).(*os.File); ok { + return isatty.IsTerminal(v.Fd()) + } + return false +} + +// IsSmartTerminal returns true if the writer w is a terminal AND +// we think that the terminal is smart enough to use VT escape codes etc. +func IsSmartTerminal(w io.Writer) bool { + return isSmartTerminal(w, runtime.GOOS, os.LookupEnv) +} + +func isSmartTerminal(w io.Writer, GOOS string, lookupEnv func(string) (string, bool)) bool { + // Not smart if it's not a tty + if !IsTerminal(w) { + return false + } + + // getenv helper for when we only care about the value + getenv := func(e string) string { + v, _ := lookupEnv(e) + return v + } + + // Explicit request for no ANSI escape codes + // https://no-color.org/ + if _, set := lookupEnv("NO_COLOR"); set { + return false + } + + // Explicitly dumb terminals are not smart + // https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals + term := getenv("TERM") + if term == "dumb" { + return false + } + // st has some bug 🤷‍♂️ + // https://github.com/kubernetes-sigs/kind/issues/1892 + if term == "st-256color" { + return false + } + + // On Windows WT_SESSION is set by the modern terminal component. + // Older terminals have poor support for UTF-8, VT escape codes, etc. + if GOOS == "windows" && getenv("WT_SESSION") == "" { + return false + } + + /* CI Systems with bad Fake TTYs */ + // Travis CI + // https://github.com/kubernetes-sigs/kind/issues/1478 + // We can detect it with documented magical environment variables + // https://docs.travis-ci.com/user/environment-variables/#default-environment-variables + if getenv("HAS_JOSH_K_SEAL_OF_APPROVAL") == "true" && getenv("TRAVIS") == "true" { + return false + } + + // OK, we'll assume it's smart now, given no evidence otherwise. + return true +} + +// trivial fake TTY writer for testing +type testFakeTTY struct{} + +func (t *testFakeTTY) Write(p []byte) (int, error) { + return len(p), nil +} + +func (t *testFakeTTY) isTestFakeTTY() {} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go new file mode 100644 index 000000000..d24d9ce39 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package patch contains helpers for applying patches +package patch diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go new file mode 100644 index 000000000..e140c0e4b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + jsonpatch "github.com/evanphx/json-patch/v5" + + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +type json6902Patch struct { + raw string // raw original contents + patch jsonpatch.Patch // processed JSON 6902 patch + matchInfo matchInfo // used to match resources +} + +func convertJSON6902Patches(patchesJSON6902 []config.PatchJSON6902) ([]json6902Patch, error) { + patches := []json6902Patch{} + for _, configPatch := range patchesJSON6902 { + patchJSON, err := yaml.YAMLToJSON([]byte(configPatch.Patch)) + if err != nil { + return nil, errors.WithStack(err) + } + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + return nil, errors.WithStack(err) + } + patches = append(patches, json6902Patch{ + raw: configPatch.Patch, + patch: patch, + matchInfo: matchInfoForConfigJSON6902Patch(configPatch), + }) + } + return patches, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go new file mode 100644 index 000000000..9bdd1997f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// KubeYAML takes a Kubernetes object YAML document stream to patch, +// merge patches, and JSON 6902 patches. +// +// It returns a patched a YAML document stream. +// +// Matching is performed on Kubernetes style v1 TypeMeta fields +// (kind and apiVersion), between the YAML documents and the patches. +// +// Patches match if their kind and apiVersion match a document, with the exception +// that if the patch does not set apiVersion it will be ignored. +func KubeYAML(toPatch string, patches []string, patches6902 []config.PatchJSON6902) (string, error) { + // pre-process, including splitting up documents etc. + resources, err := parseResources(toPatch) + if err != nil { + return "", errors.Wrap(err, "failed to parse yaml to patch") + } + mergePatches, err := parseMergePatches(patches) + if err != nil { + return "", errors.Wrap(err, "failed to parse patches") + } + json6902patches, err := convertJSON6902Patches(patches6902) + if err != nil { + return "", errors.Wrap(err, "failed to parse JSON 6902 patches") + } + // apply patches and build result + builder := &strings.Builder{} + for i, r := range resources { + // apply merge patches + for _, p := range mergePatches { + if _, err := r.applyMergePatch(p); err != nil { + return "", errors.Wrap(err, "failed to apply patch") + } + } + // apply RFC 6902 JSON patches + for _, p := range json6902patches { + if _, err := r.apply6902Patch(p); err != nil { + return "", errors.Wrap(err, "failed to apply JSON 6902 patch") + } + } + // write out result + if err := r.encodeTo(builder); err != nil { + return "", errors.Wrap(err, "failed to write patched resource") + } + // write document separator + if i+1 < len(resources) { + if _, err := builder.WriteString("---\n"); err != nil { + return "", errors.Wrap(err, "failed to write document separator") + } + } + } + // verify that all patches were used + return builder.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go new file mode 100644 index 000000000..7674ea5fb --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// we match resources and patches on their v1 TypeMeta +type matchInfo struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} + +func parseYAMLMatchInfo(raw string) (matchInfo, error) { + m := matchInfo{} + if err := yaml.Unmarshal([]byte(raw), &m); err != nil { + return matchInfo{}, errors.Wrapf(err, "failed to parse type meta for %q", raw) + } + return m, nil +} + +func matchInfoForConfigJSON6902Patch(patch config.PatchJSON6902) matchInfo { + return matchInfo{ + Kind: patch.Kind, + APIVersion: groupVersionToAPIVersion(patch.Group, patch.Version), + } +} + +func groupVersionToAPIVersion(group, version string) string { + if group == "" { + return version + } + return group + "/" + version +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go new file mode 100644 index 000000000..c52b9f55e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +type mergePatch struct { + raw string // the original raw data + json []byte // the processed data (in JSON form) + matchInfo matchInfo // for matching resources +} + +func parseMergePatches(rawPatches []string) ([]mergePatch, error) { + patches := []mergePatch{} + // split document streams before trying to parse them + splitRawPatches := make([]string, 0, len(rawPatches)) + for _, raw := range rawPatches { + splitRaw, err := splitYAMLDocuments(raw) + if err != nil { + return nil, err + } + splitRawPatches = append(splitRawPatches, splitRaw...) + } + for _, raw := range splitRawPatches { + matchInfo, err := parseYAMLMatchInfo(raw) + if err != nil { + return nil, errors.WithStack(err) + } + json, err := yaml.YAMLToJSON([]byte(raw)) + if err != nil { + return nil, errors.WithStack(err) + } + patches = append(patches, mergePatch{ + raw: raw, + json: json, + matchInfo: matchInfo, + }) + } + return patches, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go new file mode 100644 index 000000000..17f898ec9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "bufio" + "bytes" + "io" + "strings" + + jsonpatch "github.com/evanphx/json-patch/v5" + + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +type resource struct { + raw string // the original raw data + json []byte // the processed data (in JSON form), may be mutated + matchInfo matchInfo // for matching patches +} + +func (r *resource) apply6902Patch(patch json6902Patch) (matches bool, err error) { + if !r.matches(patch.matchInfo) { + return false, nil + } + patched, err := patch.patch.Apply(r.json) + if err != nil { + return true, errors.WithStack(err) + } + r.json = patched + return true, nil +} + +func (r *resource) applyMergePatch(patch mergePatch) (matches bool, err error) { + if !r.matches(patch.matchInfo) { + return false, nil + } + patched, err := jsonpatch.MergePatch(r.json, patch.json) + if err != nil { + return true, errors.WithStack(err) + } + r.json = patched + return true, nil +} + +func (r resource) matches(o matchInfo) bool { + m := &r.matchInfo + // we require kind to match, but if the patch does not specify + // APIVersion we ignore it (eg to allow trivial patches across kubeadm versions) + return m.Kind == o.Kind && (o.APIVersion == "" || m.APIVersion == o.APIVersion) +} + +func (r *resource) encodeTo(w io.Writer) error { + encoded, err := yaml.JSONToYAML(r.json) + if err != nil { + return errors.WithStack(err) + } + if _, err := w.Write(encoded); err != nil { + return errors.WithStack(err) + } + return nil +} + +func parseResources(yamlDocumentStream string) ([]resource, error) { + resources := []resource{} + documents, err := splitYAMLDocuments(yamlDocumentStream) + if err != nil { + return nil, err + } + for _, raw := range documents { + matchInfo, err := parseYAMLMatchInfo(raw) + if err != nil { + return nil, errors.WithStack(err) + } + json, err := yaml.YAMLToJSON([]byte(raw)) + if err != nil { + return nil, errors.WithStack(err) + } + resources = append(resources, resource{ + raw: raw, + json: json, + matchInfo: matchInfo, + }) + } + return resources, nil +} + +func splitYAMLDocuments(yamlDocumentStream string) ([]string, error) { + documents := []string{} + scanner := bufio.NewScanner(strings.NewReader(yamlDocumentStream)) + scanner.Split(splitYAMLDocument) + for scanner.Scan() { + documents = append(documents, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return nil, errors.Wrap(err, "error splitting documents") + } + return documents, nil +} + +const yamlSeparator = "\n---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +// this is borrowed from k8s.io/apimachinery/pkg/util/yaml/decoder.go +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go new file mode 100644 index 000000000..5923977a6 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "bytes" + "encoding/json" + + burntoml "github.com/BurntSushi/toml" + jsonpatch "github.com/evanphx/json-patch/v5" + toml "github.com/pelletier/go-toml" + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/errors" +) + +// TOML patches toPatch with the patches (should be TOML merge patches) and patches6902 (should be JSON 6902 patches) +func TOML(toPatch string, patches []string, patches6902 []string) (string, error) { + // convert to JSON for patching + j, err := tomlToJSON([]byte(toPatch)) + if err != nil { + return "", err + } + // apply merge patches + for _, patch := range patches { + pj, err := tomlToJSON([]byte(patch)) + if err != nil { + return "", err + } + patched, err := jsonpatch.MergePatch(j, pj) + if err != nil { + return "", errors.WithStack(err) + } + j = patched + } + // apply JSON 6902 patches + for _, patch6902 := range patches6902 { + patch, err := jsonpatch.DecodePatch([]byte(patch6902)) + if err != nil { + return "", errors.WithStack(err) + } + patched, err := patch.Apply(j) + if err != nil { + return "", errors.WithStack(err) + } + j = patched + } + // convert result back to TOML + return jsonToTOMLString(j) +} + +// tomlToJSON converts arbitrary TOML to JSON +func tomlToJSON(t []byte) ([]byte, error) { + // we use github.com.pelletier/go-toml here to unmarshal arbitrary TOML to JSON + tree, err := toml.LoadBytes(t) + if err != nil { + return nil, errors.WithStack(err) + } + b, err := json.Marshal(tree.ToMap()) + if err != nil { + return nil, errors.WithStack(err) + } + return b, nil +} + +// jsonToTOMLString converts arbitrary JSON to TOML +func jsonToTOMLString(j []byte) (string, error) { + var unstruct interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + if err := yaml.Unmarshal(j, &unstruct); err != nil { + return "", errors.WithStack(err) + } + // we use github.com/BurntSushi/toml here because github.com.pelletier/go-toml + // can only marshal structs AND BurntSushi/toml is what contained uses + // and has more canonically formatted output (we initially plan to use + // this package for patching containerd config) + var buff bytes.Buffer + if err := burntoml.NewEncoder(&buff).Encode(unstruct); err != nil { + return "", errors.WithStack(err) + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go new file mode 100644 index 000000000..70f70a926 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sets implements set types. +// +// This is forked from k8s.io/apimachinery/pkg/util/sets (under the same project +// and license), because k8s.io/apimachinery is a relatively heavy dependency +// and we only need some trivial utilities. Avoiding importing k8s.io/apimachinery +// makes kind easier to embed in other projects for testing etc. +// +// The set implementation is relatively small and very stable. +package sets diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go b/vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go new file mode 100644 index 000000000..e11e622c5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go b/vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go new file mode 100644 index 000000000..e6f37db88 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go new file mode 100644 index 000000000..0bb753f51 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version provides utilities for version number comparisons +// +// This is forked from k8s.io/apimachinery/pkg/util/version to make +// kind easier to import (k8s.io/apimachinery/pkg/util/version is a stable, +// mature package with no externaldependencies within a large, heavy module) +package version diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/version/version.go b/vendor/sigs.k8s.io/kind/pkg/internal/version/version.go new file mode 100644 index 000000000..8c997ec45 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/version/version.go @@ -0,0 +1,325 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Version is an opaque representation of a version number +type Version struct { + components []uint + semver bool + preRelease string + buildMetadata string +} + +var ( + // versionMatchRE splits a version string into numeric and "extra" parts + versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`) + // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release + extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`) +) + +func parse(str string, semver bool) (*Version, error) { + parts := versionMatchRE.FindStringSubmatch(str) + if parts == nil { + return nil, fmt.Errorf("could not parse %q as version", str) + } + numbers, extra := parts[1], parts[2] + + components := strings.Split(numbers, ".") + if (semver && len(components) != 3) || (!semver && len(components) < 2) { + return nil, fmt.Errorf("illegal version string %q", str) + } + + v := &Version{ + components: make([]uint, len(components)), + semver: semver, + } + for i, comp := range components { + if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + num, err := strconv.ParseUint(comp, 10, 0) + if err != nil { + return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err) + } + v.components[i] = uint(num) + } + + if semver && extra != "" { + extraParts := extraMatchRE.FindStringSubmatch(extra) + if extraParts == nil { + return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str) + } + v.preRelease, v.buildMetadata = extraParts[1], extraParts[2] + + for _, comp := range strings.Split(v.preRelease, ".") { + if _, err := strconv.ParseUint(comp, 10, 0); err == nil { + if strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + } + } + } + + return v, nil +} + +// ParseGeneric parses a "generic" version string. The version string must consist of two +// or more dot-separated numeric fields (the first of which can't have leading zeroes), +// followed by arbitrary uninterpreted data (which need not be separated from the final +// numeric field by punctuation). For convenience, leading and trailing whitespace is +// ignored, and the version can be preceded by the letter "v". See also ParseSemantic. +func ParseGeneric(str string) (*Version, error) { + return parse(str, false) +} + +// MustParseGeneric is like ParseGeneric except that it panics on error +func MustParseGeneric(str string) *Version { + v, err := ParseGeneric(str) + if err != nil { + panic(err) + } + return v +} + +// ParseSemantic parses a version string that exactly obeys the syntax and semantics of +// the "Semantic Versioning" specification (http://semver.org/) (although it ignores +// leading and trailing whitespace, and allows the version to be preceded by "v"). For +// version strings that are not guaranteed to obey the Semantic Versioning syntax, use +// ParseGeneric. +func ParseSemantic(str string) (*Version, error) { + return parse(str, true) +} + +// MustParseSemantic is like ParseSemantic except that it panics on error +func MustParseSemantic(str string) *Version { + v, err := ParseSemantic(str) + if err != nil { + panic(err) + } + return v +} + +// Major returns the major release number +func (v *Version) Major() uint { + return v.components[0] +} + +// Minor returns the minor release number +func (v *Version) Minor() uint { + return v.components[1] +} + +// Patch returns the patch release number if v is a Semantic Version, or 0 +func (v *Version) Patch() uint { + if len(v.components) < 3 { + return 0 + } + return v.components[2] +} + +// BuildMetadata returns the build metadata, if v is a Semantic Version, or "" +func (v *Version) BuildMetadata() string { + return v.buildMetadata +} + +// PreRelease returns the prerelease metadata, if v is a Semantic Version, or "" +func (v *Version) PreRelease() string { + return v.preRelease +} + +// Components returns the version number components +func (v *Version) Components() []uint { + return v.components +} + +// WithMajor returns copy of the version object with requested major number +func (v *Version) WithMajor(major uint) *Version { + result := *v + result.components = []uint{major, v.Minor(), v.Patch()} + return &result +} + +// WithMinor returns copy of the version object with requested minor number +func (v *Version) WithMinor(minor uint) *Version { + result := *v + result.components = []uint{v.Major(), minor, v.Patch()} + return &result +} + +// WithPatch returns copy of the version object with requested patch number +func (v *Version) WithPatch(patch uint) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), patch} + return &result +} + +// WithPreRelease returns copy of the version object with requested prerelease +func (v *Version) WithPreRelease(preRelease string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.preRelease = preRelease + return &result +} + +// WithBuildMetadata returns copy of the version object with requested buildMetadata +func (v *Version) WithBuildMetadata(buildMetadata string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.buildMetadata = buildMetadata + return &result +} + +// String converts a Version back to a string; note that for versions parsed with +// ParseGeneric, this will not include the trailing uninterpreted portion of the version +// number. +func (v *Version) String() string { + if v == nil { + return "" + } + var buffer bytes.Buffer + + for i, comp := range v.components { + if i > 0 { + buffer.WriteString(".") + } + buffer.WriteString(fmt.Sprintf("%d", comp)) + } + if v.preRelease != "" { + buffer.WriteString("-") + buffer.WriteString(v.preRelease) + } + if v.buildMetadata != "" { + buffer.WriteString("+") + buffer.WriteString(v.buildMetadata) + } + + return buffer.String() +} + +// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 +// if they are equal +func (v *Version) compareInternal(other *Version) int { + + vLen := len(v.components) + oLen := len(other.components) + for i := 0; i < vLen && i < oLen; i++ { + switch { + case other.components[i] < v.components[i]: + return 1 + case other.components[i] > v.components[i]: + return -1 + } + } + + // If components are common but one has more items and they are not zeros, it is bigger + switch { + case oLen < vLen && !onlyZeros(v.components[oLen:]): + return 1 + case oLen > vLen && !onlyZeros(other.components[vLen:]): + return -1 + } + + if !v.semver || !other.semver { + return 0 + } + + switch { + case v.preRelease == "" && other.preRelease != "": + return 1 + case v.preRelease != "" && other.preRelease == "": + return -1 + case v.preRelease == other.preRelease: // includes case where both are "" + return 0 + } + + vPR := strings.Split(v.preRelease, ".") + oPR := strings.Split(other.preRelease, ".") + for i := 0; i < len(vPR) && i < len(oPR); i++ { + vNum, err := strconv.ParseUint(vPR[i], 10, 0) + if err == nil { + oNum, err := strconv.ParseUint(oPR[i], 10, 0) + if err == nil { + switch { + case oNum < vNum: + return 1 + case oNum > vNum: + return -1 + default: + continue + } + } + } + if oPR[i] < vPR[i] { + return 1 + } else if oPR[i] > vPR[i] { + return -1 + } + } + + switch { + case len(oPR) < len(vPR): + return 1 + case len(oPR) > len(vPR): + return -1 + } + + return 0 +} + +// returns false if array contain any non-zero element +func onlyZeros(array []uint) bool { + for _, num := range array { + if num != 0 { + return false + } + } + return true +} + +// AtLeast tests if a version is at least equal to a given minimum version. If both +// Versions are Semantic Versions, this will use the Semantic Version comparison +// algorithm. Otherwise, it will compare only the numeric components, with non-present +// components being considered "0" (ie, "1.4" is equal to "1.4.0"). +func (v *Version) AtLeast(min *Version) bool { + return v.compareInternal(min) != -1 +} + +// LessThan tests if a version is less than a given version. (It is exactly the opposite +// of AtLeast, for situations where asking "is v too old?" makes more sense than asking +// "is v new enough?".) +func (v *Version) LessThan(other *Version) bool { + return v.compareInternal(other) == -1 +} + +// Compare compares v against a version string (which will be parsed as either Semantic +// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if +// it is greater than other, or 0 if they are equal. +func (v *Version) Compare(other string) (int, error) { + ov, err := parse(other, v.semver) + if err != nil { + return 0, err + } + return v.compareInternal(ov), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/log/doc.go b/vendor/sigs.k8s.io/kind/pkg/log/doc.go new file mode 100644 index 000000000..ffda513b9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log defines a logging interface that kind uses +// This is roughly a minimal subset of klog github.com/kubernetes/klog +package log diff --git a/vendor/sigs.k8s.io/kind/pkg/log/noop.go b/vendor/sigs.k8s.io/kind/pkg/log/noop.go new file mode 100644 index 000000000..f5a20ee69 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/noop.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +// NoopLogger implements the Logger interface and never logs anything +type NoopLogger struct{} + +// Warn meets the Logger interface but does nothing +func (n NoopLogger) Warn(message string) {} + +// Warnf meets the Logger interface but does nothing +func (n NoopLogger) Warnf(format string, args ...interface{}) {} + +// Error meets the Logger interface but does nothing +func (n NoopLogger) Error(message string) {} + +// Errorf meets the Logger interface but does nothing +func (n NoopLogger) Errorf(format string, args ...interface{}) {} + +// V meets the Logger interface but does nothing +func (n NoopLogger) V(level Level) InfoLogger { return NoopInfoLogger{} } + +// NoopInfoLogger implements the InfoLogger interface and never logs anything +type NoopInfoLogger struct{} + +// Enabled meets the InfoLogger interface but always returns false +func (n NoopInfoLogger) Enabled() bool { return false } + +// Info meets the InfoLogger interface but does nothing +func (n NoopInfoLogger) Info(message string) {} + +// Infof meets the InfoLogger interface but does nothing +func (n NoopInfoLogger) Infof(format string, args ...interface{}) {} diff --git a/vendor/sigs.k8s.io/kind/pkg/log/types.go b/vendor/sigs.k8s.io/kind/pkg/log/types.go new file mode 100644 index 000000000..875eabdf4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/types.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +// Level is a verbosity logging level for Info logs +// See also https://github.com/kubernetes/klog +type Level int32 + +// Logger defines the logging interface kind uses +// It is roughly a subset of github.com/kubernetes/klog +type Logger interface { + // Warn should be used to write user facing warnings + Warn(message string) + // Warnf should be used to write Printf style user facing warnings + Warnf(format string, args ...interface{}) + // Error may be used to write an error message when it occurs + // Prefer returning an error instead in most cases + Error(message string) + // Errorf may be used to write a Printf style error message when it occurs + // Prefer returning an error instead in most cases + Errorf(format string, args ...interface{}) + // V() returns an InfoLogger for a given verbosity Level + // + // Normal verbosity levels: + // V(0): normal user facing messages go to V(0) + // V(1): debug messages start when V(N > 0), these should be high level + // V(2): more detailed log messages + // V(3+): trace level logging, in increasing "noisiness" ... allowing + // arbitrarily detailed logging at extremely low cost unless the + // logger has actually been configured to display these (E.G. via the -v + // command line flag) + // + // It is expected that the returned InfoLogger will be extremely cheap + // to interact with for a Level greater than the enabled level + V(Level) InfoLogger +} + +// InfoLogger defines the info logging interface kind uses +// It is roughly a subset of Verbose from github.com/kubernetes/klog +type InfoLogger interface { + // Info is used to write a user facing status message + // + // See: Logger.V + Info(message string) + // Infof is used to write a Printf style user facing status message + Infof(format string, args ...interface{}) + // Enabled should return true if this verbosity level is enabled + // on the Logger + // + // See: Logger.V + Enabled() bool +}