diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5277dab73..bf267e963 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -56,3 +56,12 @@ jobs: env: GO111MODULE: "on" run: make test-unit + + - name: Running integration tests workloadcluster + env: + GIT_PROVIDER: github + GIT_ORG_NAME: SovereignCloudStack + GIT_REPOSITORY_NAME: cluster-stacks + GO111MODULE: "on" + GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }} + run: make test-integration-workloadcluster \ No newline at end of file diff --git a/Makefile b/Makefile index e758f48fd..40524bca0 100644 --- a/Makefile +++ b/Makefile @@ -313,17 +313,27 @@ $(WORKER_CLUSTER_KUBECONFIG): KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env --bin-dir $(abspath $(TOOLS_BIN_DIR)) -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION)) +.PHONY: test-integration +test-integration: test-integration-workloadcluster test-integration-github + echo done + .PHONY: test-unit test-unit: $(SETUP_ENVTEST) $(GOTESTSUM) ## Run unit @mkdir -p $(shell pwd)/.coverage - KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=.coverage/junit.xml --format testname -- -mod=vendor \ + CREATE_KIND_CLUSTER=true KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=.coverage/junit.xml --format testname -- -mod=vendor \ -covermode=atomic -coverprofile=.coverage/cover.out -p=4 ./internal/controller/... +.PHONY: test-integration-workloadcluster +test-integration-workloadcluster: $(SETUP_ENVTEST) $(GOTESTSUM) + @mkdir -p $(shell pwd)/.coverage + CREATE_KIND_CLUSTER=true KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=.coverage/junit.xml --format testname -- -mod=vendor \ + -covermode=atomic -coverprofile=.coverage/cover.out -p=1 ./internal/test/integration/workloadcluster/... + .PHONY: test-integration-github test-integration-github: $(SETUP_ENVTEST) $(GOTESTSUM) @mkdir -p $(shell pwd)/.coverage - KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=../.coverage/junit.xml --format testname -- -mod=vendor \ - -covermode=atomic -coverprofile=../.coverage/cover.out -p=1 ./internal/test/integration/github/... + CREATE_KIND_CLUSTER=false KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GOTESTSUM) --junitfile=.coverage/junit.xml --format testname -- -mod=vendor \ + -covermode=atomic -coverprofile=.coverage/cover.out -p=1 ./internal/test/integration/github/... ##@ Verify ########## diff --git a/README.md b/README.md index 5209d56b2..c833f4e5d 100644 --- a/README.md +++ b/README.md @@ -8,86 +8,27 @@ The operator can be used with any repository that contains releases of cluster s To try out this operator and cluster stacks, have a look at this [demo](https://github.com/SovereignCloudStack/cluster-stacks-demo). -## What is the Cluster Stack Operator? +## Why Cluster Stacks? -The Cluster Stack Operator facilitates the manual work that needs to be done to use cluster stacks. +Kubernetes and Cluster API enable self-service Kubernetes. But do they take care of everything? No! Both tools solve one specific purpose perfectly and leave other tasks out of scope. -There are three components of a cluster stack: +Therefore, a user has to answer questions like these: how do I get node images? How can I manage core cluster components (e.g. CCM, CNI)? How can I safely and efficiently upgrade Kubernetes clusters? -1. Cluster addons: The cluster addons (CNI, CSI, CCM) have to be applied in each workload cluster that the user starts -2. Cluster API objects: The `ClusterClass` object makes it easier to use Cluster-API. The cluster stack contains a `ClusterClass` object and other Cluster-API objects that are necessary in order to use the `ClusterClass`. These objects have to be applied in the management cluster. -3. Node images: Node images can be provided to the user in different form. They are released and tested together with the other two components of the cluster stack. +The Cluster Stacks give an answer by working hand-in-hand with Cluster API to facilitate self-service Kubernetes. They provide a framework and tools for managing a fully open-source self-service Kubernetes infrastructure with ease. They integrate seamlessly in the Cluster API cosmos. -The first two are handled by this operator here. The node images, on the other hand, have to be handled by separate provider integrations, similar to the ones that [Cluster-API uses](https://cluster-api.sigs.k8s.io/developer/providers/implementers-guide/overview). +The Cluster Stack operator enables an “Infrastructure as Software” approach for managing Kubernetes clusters in self-service. -## Implementing a provider integration +The Cluster Stacks are very generic and can be adapted to many use cases. -Further information and documentation on how to implement a provider integration will follow soon. +### Are Cluster Stacks relevant to you? -## Developing Cluster Stack Operator +Are you interested in setting up Kubernetes in your company based on open-source software? Do you not want to rely on other providers but own your Kubernetes clusters? Do you want to manage Kubernetes clusters for others? Do you plan on using Cluster API? -Developing our operator is quite easy. First, you need to install some base requirements: Docker and Go. Second, you need to configure your environment variables. Then you can start developing with the local Kind cluster and the Tilt UI to create a workload cluster that is already pre-configured. +In all of these cases, the Cluster Stacks are for you! -## Setting Tilt up -1. Install Docker and Go. We expect you to run on a Linux OS. -2. Create an ```.envrc``` file and specify the values you need. See the .envrc.sample for details. +They make it easy to build a self-service Kubernetes infrastructure for internal use, as well as a to create a Managed Kubernetes offering. -## Developing with Tilt -

-tilt -

+## Further documentation -Operator development requires a lot of iteration, and the “build, tag, push, update deployment” workflow can be very tedious. Tilt makes this process much simpler by watching for updates and automatically building and deploying them. To build a kind cluster and to start Tilt, run: - -```shell -make tilt-up -``` -> To access the Tilt UI please go to: `http://localhost:10350` - - -You should make sure that everything in the UI looks green. If not, e.g. if the clusterstack has not been synced, you can trigger the Tilt workflow again. In case of the clusterstack button this might be necessary, as it cannot be applied right after startup of the cluster and fails. Tilt unfortunately does not include a waiting period. - -If everything is green, then you can already check for your clusterstack that has been deployed. You can use a tool like k9s to have a look at the management cluster and its custom resources. - -In case your clusterstack shows that it is ready, you can deploy a workload cluster. This could be done through the Tilt UI, by pressing the button in the top right corner "Create Workload Cluster". This triggers the `make create-workload-cluster-docker`, which uses the environment variables and the cluster-template. - -In case you want to change some code, you can do so and see that Tilt triggers on save. It will update the container of the operator automatically. - -If you want to change something in your ClusterStack or Cluster custom resources, you can have a look at `.cluster.yaml` and `.clusterstack.yaml`, which Tilt uses. - -To tear down the workload cluster press the "Delete Workload Cluster" button. After a few minutes the resources should be deleted. - -To tear down the kind cluster, use: - -```shell -$ make delete-bootstrap-cluster -``` - -If you have any trouble finding the right command, then you can use `make help` to get a list of all available make targets. - -## Troubleshooting - -Check the latest events: - -```shell -kubectl get events -A --sort-by=.lastTimestamp -``` - -Check the conditions: - -```shell -go run github.com/guettli/check-conditions@latest all -``` - -Check with `clusterctl`: - -```shell -clusterctl describe cluster -n cluster my-cluster -``` - -Check the logs. List all logs from all deployments. Show the logs of the last ten minutes: - -```shell -kubectl get deployment -A --no-headers | while read -r ns d _; do echo; echo "====== $ns $d"; kubectl logs --since=10m -n $ns deployment/$d; done -``` +Please have a look at our [docs](docs/README.md) to find more information about the architecture, how to get started, how to develop this operator or provider integrations, and much more. \ No newline at end of file diff --git a/api/v1alpha1/clusterstack_types.go b/api/v1alpha1/clusterstack_types.go index 2cad61a94..e36c3cb5a 100644 --- a/api/v1alpha1/clusterstack_types.go +++ b/api/v1alpha1/clusterstack_types.go @@ -39,7 +39,7 @@ type ClusterStackSpec struct { // Channel specifies the release channel of the cluster stack. Defaults to 'stable'. // +kubebuilder:default:=stable - // +kubebuilder:validation:enum=stable;alpha;beta;rc + // +kubebuilder:validation:Enum=stable;custom Channel version.Channel `json:"channel,omitempty"` // Versions is a list of version of the cluster stack that should be available in the management cluster. diff --git a/config/crd/bases/clusterstack.x-k8s.io_clusterstacks.yaml b/config/crd/bases/clusterstack.x-k8s.io_clusterstacks.yaml index dd113a523..c608c50db 100644 --- a/config/crd/bases/clusterstack.x-k8s.io_clusterstacks.yaml +++ b/config/crd/bases/clusterstack.x-k8s.io_clusterstacks.yaml @@ -75,6 +75,9 @@ spec: default: stable description: Channel specifies the release channel of the cluster stack. Defaults to 'stable'. + enum: + - stable + - custom type: string kubernetesVersion: description: KubernetesVersion is the Kubernetes version in the format diff --git a/config/cso/cluster.yaml b/config/cso/cluster.yaml index dc88c05f8..fe4ba6451 100644 --- a/config/cso/cluster.yaml +++ b/config/cso/cluster.yaml @@ -11,7 +11,7 @@ spec: cidrBlocks: ["192.168.0.0/16"] serviceDomain: "cluster.local" topology: - class: docker-ferrol-1-27-v1 + class: docker-ferrol-1-27-v2 controlPlane: metadata: {} replicas: 1 diff --git a/config/cso/clusterstack.yaml b/config/cso/clusterstack.yaml index 75432b07d..035e059d2 100644 --- a/config/cso/clusterstack.yaml +++ b/config/cso/clusterstack.yaml @@ -11,4 +11,4 @@ spec: autoSubscribe: false noProvider: true versions: - - v1 \ No newline at end of file + - v2 \ No newline at end of file diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..0cce86875 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,31 @@ +# Documentation Index + +## General +- [Concept](concept.md) +- [Terminology](terminology.md) + +## Quickstart +- [Quickstart](topics/quickstart.md) +- [Cluster API quick start](https://cluster-api.sigs.k8s.io/user/quick-start.html) + +### Architecture +- [Overview](architecture/overview.md) +- [User flow](architecture/user-flow.md) +- [Workflow - Node images](architecture/node-image-flow.md) +- [Workflow - Management Cluster](architecture/mgt-cluster-flow.md) +- [Workflow - Workload Cluster](architecture/workload-cluster-flow.md) + +### Topics +- [Managing ClusterStack resources](topics/managing-clusterstacks.md) +- [Upgrade flow](topics/upgrade-flow.md) +- [Troubleshooting](topics/troubleshoot.md) + +### Developing +- [Development guide](develop/develop.md) +- [Develop provider integrations](develop/provider-integration.md) + +### Reference +- [General](reference/README.md) +- [ClusterStack](reference/clusterstack.md) +- [ClusterStackRelease](reference/clusterstackrelease.md) +- [ClusterAddon](reference/clusteraddon.md) \ No newline at end of file diff --git a/docs/architecture/mgt-cluster-flow.md b/docs/architecture/mgt-cluster-flow.md new file mode 100644 index 000000000..41e53ea48 --- /dev/null +++ b/docs/architecture/mgt-cluster-flow.md @@ -0,0 +1,11 @@ +# Management Cluster flow + +In a Cluster API management cluster, the Cluster API operators run. In our management cluster, there are also the Cluster Stack operators. + +The user controls workload clusters via custom resources. As the Cluster Stack approach uses `ClusterClasses`, the user has to create only a `Cluster` object and refer to a `ClusterClass`. + +However, in order for this to work, the `ClusterClass` has to be applied as well as all other Cluster API objects that are referenced by the `ClusterClass`, such as `MachineTemplates`, etc. + +These Cluster API objects are packaged in a Helm Chart that is part of every cluster stack. The clusterstackrelease-controller is responsible for applying this Helm chart, which is done by first calling `helm template` and then the "apply" method of the Kubernetes go-client. + +The main resource is always the `ClusterClass` that follows a very specific naming pattern and is called in the exact same way as the `ClusterStackRelease` object that manages it. For example, `docker-ferrol-1-27-v1`, which refers to all defining properties of a specific release of a cluster stack for a certain provider. \ No newline at end of file diff --git a/docs/architecture/node-image-flow.md b/docs/architecture/node-image-flow.md new file mode 100644 index 000000000..bfa188768 --- /dev/null +++ b/docs/architecture/node-image-flow.md @@ -0,0 +1,4 @@ +# Node image flow + +The node image flow depends on each provider. There are various ways in which providers allow the use of custom images. We have documented the options in the [cluster stacks repo](https://github.com/SovereignCloudStack/cluster-stacks#film_strip-node-images). + diff --git a/docs/architecture/overview.md b/docs/architecture/overview.md new file mode 100644 index 000000000..d9e842dec --- /dev/null +++ b/docs/architecture/overview.md @@ -0,0 +1,65 @@ +# Architecture + +![Cluster Stacks](../pics/syself-cluster-stacks-web.png) + +## Cluster stacks + +The cluster stacks are opinionated templates of clusters in which all configuration and all core components are defined. They can be implemented on any provider. + +There can be multiple cluster stacks that acknowledge the many ways in which a cluster can be set up. There is no right or wrong and cluster stacks make sure that the flexibility is not lost. + +At the same time, they offer ready-made templates for users, who do not have to spend a lot of thought on how to build clusters so that everything works well together. + +Cluster stacks are implemented by two Helm charts. The first one contains all Cluster API objects and is applied in the management cluster. The second Helm chart contains the cluster addons, i.e. the core components every cluster needs, and is installed in the workload clusters. + +Furthermore, there are node images that can look quite different depending on the provider. + +To sum up, there are three components of a cluster stack: + +1. Cluster addons: The cluster addons (CNI, CSI, CCM) have to be applied in each workload cluster that the user starts +2. Cluster API objects: The `ClusterClass` object makes it easier to use Cluster-API. The cluster stack contains a `ClusterClass` object and other Cluster-API objects that are necessary in order to use the `ClusterClass`. These objects have to be applied in the management cluster. +3. Node images: Node images can be provided to the user in different form. They are released and tested together with the other two components of the cluster stack. + +More information about cluster stacks and their three parts can be found in https://github.com/SovereignCloudStack/cluster-stacks/blob/main/README.md. + +## Cluster Stack Operator + +The Cluster Stack Operator takes care of all steps that have to be done in order to use a certain cluster stack implementation. + +It has to be installed in the management cluster and can be interacted with by applying custom resources. It extends the functionality of the Cluster API operators. + +The Cluster Stack Operator mainly applies the two Helm charts from a cluster stack implementation. It is also able to automatically fetch a remote Github repository to see whether there are new releases of a certain cluster stack. + +The first and second component of a cluster stack are handled by the Cluster Stack Operator. + +The node images, on the other hand, have to be handled by separate provider integrations, similar to the ones that [Cluster-API uses](https://cluster-api.sigs.k8s.io/developer/providers/implementers-guide/overview). + +## Cluster Stack Provider Integrations + +The Cluster Stack Operator is accompanied by Cluster Stack Provider Integrations. A provider integration is also an operator that works together with the Cluster Stack Operator in a specific way, which is described in the docs about building [provider integrations](../develop/provider-integration.md). + +A provider integration makes sure that the node images are taken care of and made available to the user. + +If there is no work to be done for node images, then the Cluster Stack Operator can work in `noProvider` mode and this Cluster Stack Provider Integration can be omitted. + +## Steps to make cluster stacks ready to use + +There are many steps that are needed in order to make cluster stacks ready to use. In order to understand the full flow better and to get an idea of how much work there is and how many personas are involved, we will give an overview of how to start from scratch with a new cluster stack and provider. + +We will assume that this operator exists, but that you want to use a new cluster stack and provider. + +### Defining a cluster stack + +First, you need to define your cluster stack. Which cluster addons do you need? How do your node images look like? You need to take these decisions and write them down. + +### Implementing a cluster stack + +The next step is to implement your cluster stack for your provider. You can take existing implementations as reference, but need to think of how the provider-specific custom resources are called and how the respective Cluster API Provider Integration works. + +### Implementing a Cluster Stack Provider Integration + +We assume that you need to do some manual tasks in order to make node images accessible on your provider. These steps should be implemented in a Cluster Stack Provider Integration, which of course has to work together with the details of how you implemented your cluster stack. + +### Using everything + +Finally, you can use the new cluster stack you defined and implemented on the infrastructure of your provider. Enjoy! \ No newline at end of file diff --git a/docs/architecture/user-flow.md b/docs/architecture/user-flow.md new file mode 100644 index 000000000..46a5c37d6 --- /dev/null +++ b/docs/architecture/user-flow.md @@ -0,0 +1,44 @@ +# Deep dive: User flow + +It is essential to understand the flow of what you have to do as a user and what happens in the background. + +The [Quickstart guide](quickstart.md) goes over all small steps you have to do to. If you are just interested in getting started, then have a look there. + +In the following, we will not go into the detail of every command, but will focus more on a high-level of what you have to do and of what happens in the background. + +## Steps to create a workload cluster + +### Get the right cluster stacks + +The first step would be to make sure that you have the cluster stacks implemented that you want to use. Usually, you will use cluster stacks that have been implemented by others for the provider that you want to use. However, you can also build your own cluster stacks. + +### Apply cluster stack resource + +If you have everything available, you can start your management cluster / bootstrap cluster. In this cluster, you have to apply the `ClusterStack` custom resource with your individual desired configuration. + +Depending on your configuration, you will have to wait until all steps are done in the background. + +The operator will perform all necessary steps to provide you with node images. If all node images are ready, it will apply the Cluster API resources that are required. + +At the end, you will have node images and Cluster API objects ready to use. There is only one step more to create a cluster. + +### Use the ClusterClasses + + That the previous step is done, you can see in the status of the `ClusterStack` object. However, you can also just check if you have certain `ClusterClass` objects. The `ClusterClass` objects will be applied by the Cluster Stack Operator as well. They follow a certain naming pattern. If you have the cluster stack "ferrol" for the docker provider and Kubernetes version 1.27 in version "v1", then you'll see a `ClusterClass` that has the name "docker-ferrol-1-27-v1". + + You can use this `ClusterClass` by referencing it in a `Cluster` object. For details, you can check out the official Cluster-API documentation. + +### Wait until cluster addons are ready + +If you created a workload cluster by applying a `Cluster` object, the cluster addons will be applied automatically. You just have to wait until everything is ready, e.g. that the CCM or CNI are installed. + +## Recap - how do Cluster API and Cluster Stacks work together? + +The user triggers the flow by configuring and applying a `ClusterStack` custom resource. This will trigger some work in the background, to make node images and Cluster API objects ready to use. + +This process is completed, when a `ClusterClass` with a certain name is created. This `ClusterClass` resource is used in order to create as many clusters as you want that look like the template specified in the `ClusterClass`. + +Upgrades of clusters are done by changing the reference to a new `ClusterClass`, e.g. from `docker-ferrol-1-27-v1` to `docker-ferrol-1-27-v2`. + +To sum up: The Cluster Stack Operator takes care of steps that you would otherwise have to do manually. It does not change anything in the normal Cluster API flow, expcept that it enforces the use of `ClusterClasses`. + diff --git a/docs/architecture/workload-cluster-flow.md b/docs/architecture/workload-cluster-flow.md new file mode 100644 index 000000000..bb29db782 --- /dev/null +++ b/docs/architecture/workload-cluster-flow.md @@ -0,0 +1,27 @@ +# The workload cluster flow + +The workload cluster flow is implemented by two controllers and one custom resource. + +The `ClusterAddon` resource gets created by the ClusterAddonCreate controller for any `Cluster` resource that is applied. + +The user never interacts with the `ClusterAddon` resource as it is created, updated, and deleted automatically. + +It is updated by the ClusterAddon controller, which makes sure that all cluster addons are applied in the respective workload cluster. + +The controller follows a simple pattern. When a cluster is created, it waits until the cluster is ready. If that is the case, it applies all objects from the ClusterAddon Helm Chart. + +If a cluster is updated, it checks whether there has been an update of the cluster addons and only if that's the case, it applies the objects again. It also deletes objects that have been there in the previous version but are not there anymore. + +Applying the objects has one additional step: we take the idea of the cluster-api-addon-provider-helm and add a few details about the `Cluster` and the `ProviderCluster` in there (https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/blob/main/internal/value_substitutions.go). + +This is necessary, because normal templating could not inject these values that are only available at runtime but that are very important to the resources that we apply as cluster addons. + + As this controller relies on the release assets to be downloaded - as do other controllers that do not download anything themselves - there is one issue after a container restart that we have to solve: + +If the container restarts, then everything that was stored in memory or without external volume in the container, will be lost. Therefore, a container restart requires to fetch from Github again. + +This takes a bit of time, even if it is just one second. If a `ClusterAddon` reconciles within this one second, it willl realize though, that the desired file is not available yet. Instead of throwing an error, we can intelligently requeue again. + +The same pattern is followed in all other controllers as well, if needed. + +This controller also sets intelligent conditions into the status of the objects to make sure that the user can understand what is going on. \ No newline at end of file diff --git a/docs/concept.md b/docs/concept.md new file mode 100644 index 000000000..a194c48cb --- /dev/null +++ b/docs/concept.md @@ -0,0 +1,45 @@ + +# Understanding the concept of Cluster Stacks + +The Cluster Stack framework was developed as one of the building blocks of an open-source Kubernetes-as-a-Service. The goal was to make it easier and more user-friendly to manage Kubernetes and Cluster API. + +## Cluster Stacks and Cluster API + +Do Cluster Stacks replace Cluster API? No! Do Cluster Stacks use Cluster API internally? No! The Cluster Stack framework accompanies Cluster API, but is on the same level of the hierarchy. The Cluster Stack approach does not wrap Cluster API into something else, but adds a few tools next to it. The Cluster Stacks are meant to take over some tasks that are relevant to the user, but for which Cluster API has no opinion. + +As a user of Cluster Stacks, you will see that you have an opinionated way of using Cluster API, for example by enforcing the use of ClusterClasses, but in the end, it is still vanilla Cluster API. However, instead of, for example, having to manage core cluster components, the so-called cluster addons, yourself, the cluster-stack-operator takes care of this. + +By installing the required CRDs as well as the cluster-stack-operator next to the Cluster API CRDs and operators into the Cluster API management cluster, you can start using the Cluster Stacks! + +To sum up: everything you know about Cluster API still applies when using the Cluster Stack Framework! + +## Why cluster stacks? + +Cluster stacks solve multiple issues users face when using Cluster API. Here a selection of them: + +- Cluster API assumes that node images are available. This might mean some manual work for the user, which is completely out of scope for Cluster API. +- Cluster API does not have a stable solution to manage core cluster components that every workload cluster needs (cloud controller manager, container network interface, etc.). There is some work around so-called "add-on providers", but this is a very recent development. +- Upgrading clusters is challenging, as there might be incompatibilites between the various components (configurations, applications, etc.). Many users don't regularly upgrade clusters because of that. +- Cluster API has some downsides with regards to user experience, as there are many different objects that a user has to apply and manage. + +The cluster stack approach tries to solve all of the above issues and tries to connect everything that users need in order to manage a fleet of Kubernetes clusters efficiently and easily. + +At the same time, it acknowledges the ease of Cluster API and uses it as its core component. Instead of re-inventing the wheel, Cluster API is extended with relevant and meaningful additions that improve the user experience. + +## What do cluster stacks NOT try to do? + +Cluster stacks concentrate on providing users with all necessary Cluster API objects in the management cluster, on providing node images (according to the demands of the respective provider), as well as core components of the workload clusters. The cluster stacks aim to provide a way of testing and versioning full templates of clusters. + +However, they also aim to fulfill their purpose in a similar way of Cluster API by concentrating on one very important part and do that very well. + +If there are any other use cases, e.g. installing applications automatically in workload clusters (an observability stack, GitOps tools, etc.), then this is a use case that is outside of the cluster stacks functionality. + +They are not intended to incorporate all features that users might want, but they can easily go hand-in-hand with other tools that enhance Cluster API. + +## Integrating cluster stacks with other tools + +The Cluster API cosmos is large and there are many tools around that can prove useful. Cluster stacks should be compatible with most of the other tools that you might want to use or build, as long as they follow the same pattern of using the declaritive approach of having custom resources that are reconciled by operators. + +The cluster stacks can be used via custom resources and an operator reconciling them. Custom resources allow users to extend the Kubernetes API according to their needs. + +If you want to add your own functionality, you can also define your CRDs and write operators to reconcile them. If you think that your idea is very generic and might be interesting for the community in general, then reach out to the SCS team. Together, we will be able to improve the user experience even further! diff --git a/docs/develop/develop.md b/docs/develop/develop.md new file mode 100644 index 000000000..ddc6fdb85 --- /dev/null +++ b/docs/develop/develop.md @@ -0,0 +1,42 @@ +# Develop Cluster Stack Operator + + +Developing our operator is quite easy. First, you need to install some base requirements: Docker and Go. Second, you need to configure your environment variables. Then you can start developing with the local Kind cluster and the Tilt UI to create a workload cluster that is already pre-configured. + +## Setting Tilt up +1. Install Docker and Go. We expect you to run on a Linux OS. +2. Create an ```.envrc``` file and specify the values you need. See the .envrc.sample for details. + +## Developing with Tilt + +

+tilt +

+ +Operator development requires a lot of iteration, and the “build, tag, push, update deployment” workflow can be very tedious. Tilt makes this process much simpler by watching for updates and automatically building and deploying them. To build a kind cluster and to start Tilt, run: + +```shell +make tilt-up +``` +> To access the Tilt UI please go to: `http://localhost:10351` + + +You should make sure that everything in the UI looks green. If not, e.g. if the clusterstack has not been synced, you can trigger the Tilt workflow again. In case of the clusterstack button this might be necessary, as it cannot be applied right after startup of the cluster and fails. Tilt unfortunately does not include a waiting period. + +If everything is green, then you can already check for your clusterstack that has been deployed. You can use a tool like k9s to have a look at the management cluster and its custom resources. + +In case your clusterstack shows that it is ready, you can deploy a workload cluster. This could be done through the Tilt UI, by pressing the button in the top right corner "Create Workload Cluster". This triggers the `make create-workload-cluster-docker`, which uses the environment variables and the cluster-template. + +In case you want to change some code, you can do so and see that Tilt triggers on save. It will update the container of the operator automatically. + +If you want to change something in your ClusterStack or Cluster custom resources, you can have a look at `.cluster.yaml` and `.clusterstack.yaml`, which Tilt uses. + +To tear down the workload cluster press the "Delete Workload Cluster" button. After a few minutes the resources should be deleted. + +To tear down the kind cluster, use: + +```shell +$ make delete-bootstrap-cluster +``` + +If you have any trouble finding the right command, then you can use `make help` to get a list of all available make targets. diff --git a/docs/develop/provider-integration.md b/docs/develop/provider-integration.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/pics/syself-cluster-stacks-web.png b/docs/pics/syself-cluster-stacks-web.png new file mode 100644 index 000000000..df4355037 Binary files /dev/null and b/docs/pics/syself-cluster-stacks-web.png differ diff --git a/docs/reference/README.md b/docs/reference/README.md new file mode 100644 index 000000000..2cbc058a6 --- /dev/null +++ b/docs/reference/README.md @@ -0,0 +1,9 @@ +# Object reference + +In this reference, you will find all resources that are defined in this API. Please note that depending on the provider, there are also provider-specific custom resources that are important to use this operator. + +Have a look in the documentation of the respective provider integration to find more details of how the provider-specific custom resources look like and how they can be used. + +The only user-facing CRD of the Cluster Stack Operator is the [ClusterStack](clusterstack.md). Apart from that, there will be, depending on a provider, a `ProviderClusterStackReleaseTemplate`, which gives provider-specific information and has to be applied by the user. + +To see all CRDs, including the ones that are not intended to be applied by users, see [here](https://doc.crds.dev/github.com/SovereignCloudStack/cluster-stack-operator). \ No newline at end of file diff --git a/docs/reference/clusterstack.md b/docs/reference/clusterstack.md new file mode 100644 index 000000000..1e044dfa1 --- /dev/null +++ b/docs/reference/clusterstack.md @@ -0,0 +1,48 @@ +## ClusterStack + +The `ClusterStack` object is the main resource for users to work with. It contains the most important details of a cluster stack and its releases (i.e. certain versions). In its status is the main source of information of the state of everything related to cluster stacks. + + + +### Lifecycle of a ClusterStack + +The `ClusterStack` object has a sub-resource `ClusterStackRelease` for every release that should be provided to the user, either by specifying them manually in the versions array, or automatically through the auto-subscribe functionality. + +The controller reconciles the two sources of information and checks whether for every release that should exist, there is actually one. It also deletes `ClusterStackRelease` objects that are not required anymore. + +Additionally, it fetches information from the `ClusterStackRelease` objects and populates its own state with it. + +In case that a provider integration is used, it will create `ProviderClusterStackRelease` objects in addition to `ClusterStackRelease` objects, based on the `ProviderClusterStackReleaseTemplate` objects given as reference in `spec.providerRef`. + + +### Overview of ClusterStack.Spec + +| Key | Type | Default | Required | Description | +| ------------------------ | --------- | ------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| provider | string | | yes | Name of the provider, e.g. "docker". It is used in various places, e.g. while fetching the respective release assets or while naming resources (ClusterStackReleases, ProviderClusterStackResources, etc.).| +| name | string | | yes | Name of the cluster stack. It is used as well for fetching release assets and other tasks. | +| kubernetesVersion | string | | yes | Kubernetes version in the format '.', e.g. 1.26. Specifies the Kubernetes minor version of the cluster stack that should be taken.| +| channel | string | stable | no | Name of release channel that is used, e.g. stable channel ("v1", "v2", etc.) or beta channel (e.g. "v0-beta.1").| +| versions | []string | | no | List of versions that the controller should make available of a cluster stack. Used only in case very specific versions are supposed to be used. Not required if always the latest versions should be made available. | +| autoSubscribe | bool | true | no | Specifies whether the controller should automatically check whether there are new releases of the cluster stack and if so automatically download them. | +| noProvider | bool | false | no | If set to true, the controller does not expect any provider-specific objects and just focuses on applying Cluster API objects in management cluster and cluster addons in all workload clusters. | +| providerRef | object | | no | ProviderRef has to be specified if spec.noProvider is false. It references the ProviderClusterStackReleaseTemplate that contains all information to create the ProviderClusterStackRelease objects. | + +### Example of the ClusterStack object + +You should create one of these objects for each of your bare metal servers that you want to use for your deployment. + +```yaml +apiVersion: clusterstack.x-k8s.io/v1alpha1 +kind: ClusterStack +metadata: + name: clusterstack + namespace: cluster +spec: + provider: docker + name: ferrol + kubernetesVersion: "1.27" + channel: stable + autoSubscribe: true + noProvider: true +``` \ No newline at end of file diff --git a/docs/terminology.md b/docs/terminology.md new file mode 100644 index 000000000..e4d8b48ef --- /dev/null +++ b/docs/terminology.md @@ -0,0 +1,35 @@ +# Terminology + +The Cluster Stacks are a framework and provide tools how to use them. The terminology is not perfect and if you have an idea how to improve it, please reach out. Right now there are the following terms: + +## Cluster Stack framework + +The framework of cluster stacks refers to the fact that cluster stacks of any shape can be created for any provider that supports Cluster API. The framework has no opinion about how the Cluster Stacks have to look like, which configuration of node images and Kubernetes you use and which cluster addons you include. + +As long as a cluster stack is implemented and released in a correct way, the cluster-stack-operator will be able to use it, fully independent of the detailed architectural decisions that were taken with regards to how the clusters that come out of this cluster stack should look like. + +This flexibility is meant by the term "framework". + +## A definition of a cluster stack + +A definition of a cluster stack is a document describing, independent of any provider, how a cluster stack xyz should look like. On a very high level, this could be something like "we want to use Ubuntu node images, basic Kubeadm and Cilium as CNI". + +There is no template for such a definition and no pre-defined structure how such a definition should look like. + +A definition of a cluster stack xyz can be used as a base to implement this cluster stack xyz for providers a and b. + +## An implementation of a cluster Stack + +A cluster stack can be implemented for a certain provider. The collection of configuration code, Helm charts, etc. is what we call an implementation of a cluster stack. The release assets that have to be generated from that is what people actually use, usually with the Cluster Stack Operator. + +## Cluster Stack Operator + +A Kubernetes operator that works with the release assets and applies resources in management and each workload cluster. It works together with Provider Integrations, if they are needed. + +## Cluster Stack Provider Integration + +Provider integrations are needed if a user has to do manual steps on a provider to use custom node images for the nodes in a cluster. If no such steps are required, then the provider integration is not needed. + +## ClusterStack as custom resource definition + +The `ClusterStack` is a CRD that the user interacts with directly. It shows in its status the state of the respective versions of this cluster stack that the user wants to have. \ No newline at end of file diff --git a/docs/topics/managing-clusterstacks.md b/docs/topics/managing-clusterstacks.md new file mode 100644 index 000000000..caa97e636 --- /dev/null +++ b/docs/topics/managing-clusterstacks.md @@ -0,0 +1,9 @@ +# Managing ClusterStack objects + +The `ClusterStack` object is the central resource that you have to work with. You have to specify a provider, the name of the cluster stack you want to use, as well as the Kubernetes minor version. + +If you want to use multiple different Kubernetes minor versions, you will have to create multiple `ClusterStack` objects. The same goes for multiple providers, or multiple cluster stacks (e.g. ferrol) that might have different features. + +In order to use a cluster stack in a specific version, you have two options: first, you can specify a list of versions in `spec.versions`. Second, you can enable `autoSubscribe`, so that the operator will automatically check for the latest version and make it available to you. + +Usually, you will always want to use auto-subscribe, so that the operator takes care of providing you with the latest versions. diff --git a/docs/topics/quickstart.md b/docs/topics/quickstart.md new file mode 100644 index 000000000..82208cad2 --- /dev/null +++ b/docs/topics/quickstart.md @@ -0,0 +1,5 @@ +# Quickstart + +Currently, there is a [demo](https://github.com/SovereignCloudStack/cluster-stacks-demo) that can be used to see how the Cluster Stack approach can work. It uses the Docker Provider Integration for Cluster API. + +A proper quickstart guide will follow as soon as the OpenStack Provider Integration is completed. \ No newline at end of file diff --git a/docs/topics/troubleshoot.md b/docs/topics/troubleshoot.md new file mode 100644 index 000000000..eef5fdf89 --- /dev/null +++ b/docs/topics/troubleshoot.md @@ -0,0 +1,26 @@ + +## Troubleshooting + +Check the latest events: + +```shell +kubectl get events -A --sort-by=.lastTimestamp +``` + +Check the conditions: + +```shell +go run github.com/guettli/check-conditions@latest all +``` + +Check with `clusterctl`: + +```shell +clusterctl describe cluster -n cluster my-cluster +``` + +Check the logs. List all logs from all deployments. Show the logs of the last ten minutes: + +```shell +kubectl get deployment -A --no-headers | while read -r ns d _; do echo; echo "====== $ns $d"; kubectl logs --since=10m -n $ns deployment/$d; done +``` diff --git a/docs/topics/upgrade-flow.md b/docs/topics/upgrade-flow.md new file mode 100644 index 000000000..a4e69e8bf --- /dev/null +++ b/docs/topics/upgrade-flow.md @@ -0,0 +1,18 @@ +# Upgrade flow + +This flow assumes that you have an existing cluster that references a certain `ClusterClass` called `docker-ferrol-1-27-v1`. + +There are two forms of updates: "normal" cluster stack updates, where you would update the above `ClusterClass` to `docker-ferrol-1-27-v2`, and updates of the Kubernetes minor version, e.g. `docker-ferrol-1-28-v1`. + +In both cases, you need to make sure that you have the respective `ClusterClass` available. This works a bit different in the two cases, as you will need a new `ClusterStack` object in the latter case that works with Kubernetes minor version 1.28. This is one of the properties of a `ClusterStack` that you specify in the spec. + +After you made sure that you have the `ClusterClass` ready to which you want to upgrade, you can edit your `Cluster` object according to the following pattern: + +Update `spec.topology.class` to the name of the new `ClusterClass` and change `spec.topology.version` to the respective Kubernetes version. This can be, for example, "1.28.1". You have to find out the right Kubernetes version for the respective cluster stack. + +You can either do this by checking the status of the `ClusterStack` object, or by fetching the `ClusterStackRelease` objects. You will find a `ClusterStackRelease` object that has the same name as your desired `ClusterClass`. This object has a property `status.kubernetesVersion` that shows you the version that you need to specify. + +Another option is to check the documentation of the cluster stack to find information about the respective releases. + +Please note that `spec.topology.version` does not have to be specified if a mutating webhook fills the property automatically for you. Check out the latest release notes of the operator to verify whether that is implemented already. + diff --git a/go.mod b/go.mod index 41e446006..ad03d2db7 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( sigs.k8s.io/cluster-api v1.5.2 sigs.k8s.io/cluster-api/test v1.5.2 sigs.k8s.io/controller-runtime v0.15.1 + sigs.k8s.io/kind v0.20.0 ) require ( @@ -30,6 +31,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Masterminds/squirrel v1.5.3 // indirect + github.com/alessio/shellescape v1.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/containerd v1.6.18 // indirect @@ -47,6 +49,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect @@ -70,6 +73,7 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -153,7 +157,7 @@ require ( google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.1 // indirect + gopkg.in/yaml.v3 v3.0.1 k8s.io/apiextensions-apiserver v0.27.2 k8s.io/apiserver v0.27.2 // indirect k8s.io/component-base v0.27.2 // indirect diff --git a/go.sum b/go.sum index e44d63ce9..b4dd30dd4 100644 --- a/go.sum +++ b/go.sum @@ -40,6 +40,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -65,6 +66,8 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/O github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -123,6 +126,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -308,6 +312,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -489,6 +495,7 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= @@ -502,6 +509,9 @@ github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -582,6 +592,7 @@ github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -1064,6 +1075,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1132,6 +1144,8 @@ sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUT sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= +sigs.k8s.io/kind v0.20.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= sigs.k8s.io/kustomize/api v0.13.2 h1:kejWfLeJhUsTGioDoFNJET5LQe/ajzXhJGYoU+pJsiA= sigs.k8s.io/kustomize/api v0.13.2/go.mod h1:DUp325VVMFVcQSq+ZxyDisA8wtldwHxLZbr1g94UHsw= sigs.k8s.io/kustomize/kyaml v0.14.1 h1:c8iibius7l24G2wVAGZn/Va2wNys03GXLjYVIcFVxKA= diff --git a/hack/kind-dev.sh b/hack/kind-dev.sh index 53b5bab20..7c5937e25 100755 --- a/hack/kind-dev.sh +++ b/hack/kind-dev.sh @@ -24,21 +24,21 @@ REPO_ROOT=$(git rev-parse --show-toplevel) cd "${REPO_ROOT}" || exit 1 # Creates a kind cluster with the ctlptl tool https://github.com/tilt-dev/ctlptl -ctlptl_kind-cluster-with-registry () { - -local CLUSTER_NAME=$1 -local CLUSTER_VERSION=$2 +ctlptl_kind-cluster-with-registry() { -cat </// + // For example: /tmp/downloads/cluster-stacks/docker-ferrol-1-26-v2/ downloadPath = filepath.Join(downloadPath, clusterStackSuffix, tag) - cs, err := clusterstack.NewFromString(tag) + cs, err := clusterstack.NewFromClusterStackReleaseProperties(tag) if err != nil { return Release{}, false, fmt.Errorf("failed to parse cluster stack release: %w", err) } @@ -92,6 +94,18 @@ func New(tag, downloadPath string) (Release, bool, error) { return rel, false, nil } +// ConvertFromClusterClassToClusterStackFormat converts `docker-ferrol-1-27-v0-sha.3960147` way to +// `docker-ferrol-1-27-v0-sha-3960147`. +func ConvertFromClusterClassToClusterStackFormat(input string) string { + parts := strings.Split(input, ".") + + if len(parts) == 2 { + return fmt.Sprintf("%s-%s", parts[0], parts[1]) + } + + return input +} + func ensureMetadata(downloadPath, metadataFileName string) (Metadata, error) { // Read the metadata.yaml file from the release. metadataPath := filepath.Join(downloadPath, metadataFileName) @@ -107,6 +121,25 @@ func ensureMetadata(downloadPath, metadataFileName string) (Metadata, error) { return Metadata{}, fmt.Errorf("failed to unmarshal metadata: %w", err) } + // Normalize the versions of metadata from v1-alpha.1 to v1-alpha-1 format. + metaClusterStackVersion, err := version.New(metadata.Versions.ClusterStack) + if err != nil { + return Metadata{}, fmt.Errorf("failed to parse ClusterStack version from metadata: %w", err) + } + metadata.Versions.ClusterStack = metaClusterStackVersion.String() + + metaClusterAddonVersion, err := version.New(metadata.Versions.Components.ClusterAddon) + if err != nil { + return Metadata{}, fmt.Errorf("failed to parse ClusterAddon version from metadata: %w", err) + } + metadata.Versions.Components.ClusterAddon = metaClusterAddonVersion.String() + + metaNodeImageVersion, err := version.New(metadata.Versions.Components.NodeImage) + if err != nil { + return Metadata{}, fmt.Errorf("failed to parse NodeImage version from metadata: %w", err) + } + metadata.Versions.Components.NodeImage = metaNodeImageVersion.String() + return metadata, nil } @@ -165,13 +198,15 @@ func (r *Release) Validate() error { // clusterAddonChartName returns the helm chart name for cluster addon. func (r *Release) clusterAddonChartName() string { - return fmt.Sprintf("%s-%s-%s-cluster-addon-%s", r.ClusterStack.Provider, r.ClusterStack.Name, r.ClusterStack.KubernetesVersion, r.Meta.Versions.Components.ClusterAddon) + clusterAddonVersion, _ := version.ParseVersionString(r.Meta.Versions.Components.ClusterAddon) + return fmt.Sprintf("%s-%s-%s-cluster-addon-%s", r.ClusterStack.Provider, r.ClusterStack.Name, r.ClusterStack.KubernetesVersion, clusterAddonVersion.StringWithDot()) } // ClusterAddonChartPath returns the helm chart name from the given path. func (r *Release) ClusterAddonChartPath() string { // we ignore the error here, since we already checked for the presence of the chart. - path, _ := r.helmChartNamePath(r.clusterAddonChartName()) + name := r.clusterAddonChartName() + path, _ := r.helmChartNamePath(name) return path } @@ -182,7 +217,7 @@ func (r *Release) ClusterAddonValuesPath() string { // clusterClassChartName returns the helm chart name for cluster class. func (r *Release) clusterClassChartName() string { - return fmt.Sprintf("%s-%s-%s-cluster-class-%s", r.ClusterStack.Provider, r.ClusterStack.Name, r.ClusterStack.KubernetesVersion, r.ClusterStack.Version.String()) + return fmt.Sprintf("%s-%s-%s-cluster-class-%s", r.ClusterStack.Provider, r.ClusterStack.Name, r.ClusterStack.KubernetesVersion, r.ClusterStack.Version.StringWithDot()) } // ClusterClassChartPath returns the absolute helm chart path for cluster class. diff --git a/pkg/version/channel.go b/pkg/version/channel.go index 3acc415cd..1dd33c6fa 100644 --- a/pkg/version/channel.go +++ b/pkg/version/channel.go @@ -23,18 +23,12 @@ type Channel string const ( // ChannelStable is the stable channel. ChannelStable = Channel("stable") - // ChannelAlpha is the alpha channel. - ChannelAlpha = Channel("alpha") - // ChannelBeta is the beta channel. - ChannelBeta = Channel("beta") - // ChannelRC is the rc channel. - ChannelRC = Channel("rc") + // ChannelCustom is the custom channel where git hash is used in the versioning. + ChannelCustom = Channel("custom") ) // IsValid returns true if the release channel is valid. func (c Channel) IsValid() bool { return c == ChannelStable || - c == ChannelAlpha || - c == ChannelBeta || - c == ChannelRC + c == ChannelCustom } diff --git a/pkg/version/version.go b/pkg/version/version.go index 69175d105..ec77f7cf8 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -34,27 +34,28 @@ import ( type Version struct { Major int Channel Channel - Patch int + Patch string } -// New returns a Version struct from a version string -// Sample allowed inputs: "v1-alpha-1", "v1", "v1-alpha-0" -// Sample disallowed inputs: "v1-alpha", "v1-alpha-1.0", "v1-alpha-1.0.0", "v1-alpha.", "v1.0-alpha.1". -func New(version string) (Version, error) { - var major, patch int - var err error +// ParseVersionString returns a Version struct from a version string like - +// "v1", "v1-alpha-1", "v1-beta-3", etc. +func ParseVersionString(version string) (Version, error) { + var ( + major int + patch string + err error + ) channel := ChannelStable - re := regexp.MustCompile(`^v\d+(-\b\w+\b-\d+)?$`) - match := re.FindStringSubmatch(version) - - if len(match) == 0 { + re := regexp.MustCompile(`^v\d+(-\b\w+\b\-\w+)?$`) + match := re.MatchString(version) + if !match { return Version{}, fmt.Errorf("invalid version string %s", version) } // match[0] is the entire string e.g "v1-alpha-1" or "v1" // split match[0] with "-" as the delimiter - ver := strings.Split(match[0], "-") + ver := strings.Split(version, "-") // ver[0] is the major version // trim the "v" prefix and then convert to int @@ -67,9 +68,57 @@ func New(version string) (Version, error) { // ver[2] is the patch if len(ver) == 3 { channel = Channel(ver[1]) - if patch, err = strconv.Atoi(ver[2]); err != nil { - return Version{}, fmt.Errorf("invalid patch value in version %s", ver[2]) + patch = ver[2] + } + clusterStackVersion := Version{ + Major: major, + Channel: channel, + Patch: patch, + } + if err := clusterStackVersion.Validate(); err != nil { + return Version{}, err + } + + return clusterStackVersion, nil +} + +// New returns a Version struct from a version string +// Sample allowed inputs: "v1-alpha.1", "v1", "v1-alpha.0" +// Sample disallowed inputs: "v1-alpha", "v1-alpha-1.0", "v1-alpha-1.0.0", "v1-alpha.", "v1.0-alpha.1". +func New(version string) (Version, error) { + var ( + major int + patch string + err error + ) + channel := ChannelStable + + re := regexp.MustCompile(`^v\d+(-\b\w+\b\.\w+)?$`) + match := re.MatchString(version) + if !match { + return Version{}, fmt.Errorf("invalid version string %s", version) + } + + // match[0] is the entire string e.g "v1-alpha.1" or "v1" + // split match[0] with "-" as the delimiter + ver := strings.Split(version, "-") + + // ver[0] is the major version + // trim the "v" prefix and then convert to int + if major, err = strconv.Atoi(strings.TrimPrefix(ver[0], "v")); err != nil { + return Version{}, fmt.Errorf("invalid major version %s", ver[0]) + } + + // If the length of ver is 2, then the version string is of the form "v1-alpha.1", and split it - + // ver[0] is the channel + // ver[1] is the patch + if len(ver) == 2 { + splittedChannelPatch := strings.Split(ver[1], ".") + if len(splittedChannelPatch) != 2 { + return Version{}, fmt.Errorf("invalid version string %s", version) } + channel = Channel(splittedChannelPatch[0]) + patch = splittedChannelPatch[1] } clusterStackVersion := Version{ @@ -86,13 +135,15 @@ func New(version string) (Version, error) { // FromReleaseTag returns a Version struct from a release tag string. func FromReleaseTag(releaseTag string) (Version, error) { v := strings.Split(releaseTag, "-") - if len(v) != 5 && len(v) != 6 { + if len(v) != 5 && len(v) != 7 { return Version{}, fmt.Errorf("invalid release tag %s", releaseTag) } + // for docker-ferrol-1-26-v1 type tag, v[4] is the version if len(v) == 5 { - return New(v[4]) + return ParseVersionString(v[4]) } - return New(fmt.Sprintf("%s-%s", v[4], v[5])) + // for docker-ferrol-1-26-v1-alpha-0 type tag, v[4] is the version and v[5] is the release channel + patch version + return ParseVersionString(fmt.Sprintf("%s-%s-%s", v[4], v[5], v[6])) } // Validate validates the version. @@ -100,15 +151,32 @@ func (csv *Version) Validate() error { if csv.Major < 0 { return fmt.Errorf("major version should be a non-negative integer") } - if !csv.Channel.IsValid() { - return fmt.Errorf("invalid channel: %s", csv.Channel) - } - if csv.Patch < 0 { - return fmt.Errorf("patch version should be a non-negative integer") + + if csv.Channel != ChannelStable { + // Check if the patch is a valid integer + if isInteger(csv.Patch) { + // If it's an integer, check if it's greater than 0 + patchInt, _ := strconv.Atoi(csv.Patch) + if patchInt < 0 { + return fmt.Errorf("patch version should be a non-negative integer") + } + } + + // If it's alpha numeric, check if it's empty + if csv.Patch == "" { + return fmt.Errorf("patch can't empty") + } } + return nil } +// isInteger checks if the given string is a valid integer. +func isInteger(s string) bool { + _, err := strconv.Atoi(s) + return err == nil +} + // Compare compares two Version structs // Returns 1 if csv is greater than input // Returns -1 if csv is less than input @@ -138,9 +206,23 @@ func (csv Version) Compare(input Version) (int, error) { return 0, nil } +// String converts a Version struct to a string representation. +// If the channel is stable, it returns the version in the format "vMajor". +// Otherwise, it returns the version in the format "vMajor-Channel-Patch". func (csv Version) String() string { if csv.Channel == ChannelStable { return fmt.Sprintf("v%d", csv.Major) } - return fmt.Sprintf("v%d-%s-%d", csv.Major, csv.Channel, csv.Patch) + return fmt.Sprintf("v%d-%s-%s", csv.Major, csv.Channel, csv.Patch) +} + +// StringWithDot converts a Version struct to a string representation. +// If the channel is stable, it returns the version in the format "vMajor". +// Otherwise, it returns the version in the format "vMajor-Channel.Patch", +// similar to String but with a dot separating channel and patch. +func (csv Version) StringWithDot() string { + if csv.Channel == ChannelStable { + return fmt.Sprintf("v%d", csv.Major) + } + return fmt.Sprintf("v%d-%s.%s", csv.Major, csv.Channel, csv.Patch) } diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/cluster-addon-values.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/cluster-addon-values.yaml new file mode 100644 index 000000000..17171abe0 --- /dev/null +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/cluster-addon-values.yaml @@ -0,0 +1,5 @@ +values: | + metrics-server: + commonLabels: + domain: "{{ .Cluster.spec.controlPlaneEndpoint.host }}" + clusterAddonVersion: "v1" diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-addon-v1.tgz b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-addon-v1.tgz index 8be581f0f..22727489b 100644 Binary files a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-addon-v1.tgz and b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-addon-v1.tgz differ diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-class-v1.tgz b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-class-v1.tgz index b68e08bd0..995c95646 100644 Binary files a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-class-v1.tgz and b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-class-v1.tgz differ diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/metadata.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/metadata.yaml index 99af1587b..cdbdc1cfd 100644 --- a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/metadata.yaml +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/metadata.yaml @@ -4,4 +4,4 @@ versions: kubernetes: v1.27.3 components: clusterAddon: v1 - nodeImage: v1 \ No newline at end of file + nodeImage: v1 diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v1/topology-docker.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/topology-docker.yaml new file mode 100644 index 000000000..c629f448f --- /dev/null +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v1/topology-docker.yaml @@ -0,0 +1,26 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "cluster" +spec: + clusterNetwork: + services: + cidrBlocks: ["10.128.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: "cluster.local" + topology: + class: docker-ferrol-1-27-v1 + controlPlane: + metadata: {} + replicas: 1 + variables: + - name: imageRepository + value: "" + version: v1.27.3 + workers: + machineDeployments: + - class: workeramd64 + name: md-0 + replicas: 1 diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/cluster-addon-values.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/cluster-addon-values.yaml new file mode 100644 index 000000000..e0033983a --- /dev/null +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/cluster-addon-values.yaml @@ -0,0 +1,5 @@ +values: | + metrics-server: + commonLabels: + domain: "{{ .Cluster.spec.controlPlaneEndpoint.host }}" + clusterAddonVersion: "v2" diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-addon-v2.tgz b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-addon-v2.tgz index 8be581f0f..e0feb436d 100644 Binary files a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-addon-v2.tgz and b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-addon-v2.tgz differ diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-class-v2.tgz b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-class-v2.tgz index b68e08bd0..91d82e1d6 100644 Binary files a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-class-v2.tgz and b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/docker-ferrol-1-27-cluster-class-v2.tgz differ diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/metadata.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/metadata.yaml index 99af1587b..47ce43f1a 100644 --- a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/metadata.yaml +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/metadata.yaml @@ -1,7 +1,7 @@ apiVersion: metadata.clusterstack.x-k8s.io/v1alpha1 versions: - clusterStack: v1 + clusterStack: v2 kubernetes: v1.27.3 components: - clusterAddon: v1 - nodeImage: v1 \ No newline at end of file + clusterAddon: v2 + nodeImage: v1 diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v2/topology-docker.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/topology-docker.yaml new file mode 100644 index 000000000..ec1f970c7 --- /dev/null +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v2/topology-docker.yaml @@ -0,0 +1,26 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "cluster" +spec: + clusterNetwork: + services: + cidrBlocks: ["10.128.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: "cluster.local" + topology: + class: docker-ferrol-1-27-v2 + controlPlane: + metadata: {} + replicas: 1 + variables: + - name: imageRepository + value: "" + version: v1.27.3 + workers: + machineDeployments: + - class: workeramd64 + name: md-0 + replicas: 1 diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v3/cluster-addon-values.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/cluster-addon-values.yaml new file mode 100644 index 000000000..e0033983a --- /dev/null +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/cluster-addon-values.yaml @@ -0,0 +1,5 @@ +values: | + metrics-server: + commonLabels: + domain: "{{ .Cluster.spec.controlPlaneEndpoint.host }}" + clusterAddonVersion: "v2" diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v3/docker-ferrol-1-27-cluster-addon-v2.tgz b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/docker-ferrol-1-27-cluster-addon-v2.tgz new file mode 100644 index 000000000..5b0933f5b Binary files /dev/null and b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/docker-ferrol-1-27-cluster-addon-v2.tgz differ diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v3/docker-ferrol-1-27-cluster-class-v3.tgz b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/docker-ferrol-1-27-cluster-class-v3.tgz new file mode 100644 index 000000000..209be4e73 Binary files /dev/null and b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/docker-ferrol-1-27-cluster-class-v3.tgz differ diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v3/metadata.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/metadata.yaml new file mode 100644 index 000000000..58950cca3 --- /dev/null +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/metadata.yaml @@ -0,0 +1,7 @@ +apiVersion: metadata.clusterstack.x-k8s.io/v1alpha1 +versions: + clusterStack: v3 + kubernetes: v1.27.3 + components: + clusterAddon: v2 + nodeImage: v2 diff --git a/test/releases/cluster-stacks/docker-ferrol-1-27-v3/topology-docker.yaml b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/topology-docker.yaml new file mode 100644 index 000000000..f47dd0c27 --- /dev/null +++ b/test/releases/cluster-stacks/docker-ferrol-1-27-v3/topology-docker.yaml @@ -0,0 +1,26 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "cluster" +spec: + clusterNetwork: + services: + cidrBlocks: ["10.128.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: "cluster.local" + topology: + class: docker-ferrol-1-27-v3 + controlPlane: + metadata: {} + replicas: 1 + variables: + - name: imageRepository + value: "" + version: v1.27.3 + workers: + machineDeployments: + - class: workeramd64 + name: md-0 + replicas: 1 diff --git a/vendor/github.com/alessio/shellescape/.gitignore b/vendor/github.com/alessio/shellescape/.gitignore new file mode 100644 index 000000000..4ba7c2d13 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea/ + +escargs diff --git a/vendor/github.com/alessio/shellescape/.golangci.yml b/vendor/github.com/alessio/shellescape/.golangci.yml new file mode 100644 index 000000000..cd4a17e44 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.golangci.yml @@ -0,0 +1,64 @@ +# run: +# # timeout for analysis, e.g. 30s, 5m, default is 1m +# timeout: 5m + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - goconst + - gocritic + - gofmt + - goimports + - golint + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - maligned + - misspell + - prealloc + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - misspell + - wsl + +issues: + exclude-rules: + - text: "Use of weak random number generator" + linters: + - gosec + - text: "comment on exported var" + linters: + - golint + - text: "don't use an underscore in package name" + linters: + - golint + - text: "ST1003:" + linters: + - stylecheck + # FIXME: Disabled until golangci-lint updates stylecheck with this fix: + # https://github.com/dominikh/go-tools/issues/389 + - text: "ST1016:" + linters: + - stylecheck + +linters-settings: + dogsled: + max-blank-identifiers: 3 + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + +run: + tests: false diff --git a/vendor/github.com/alessio/shellescape/.goreleaser.yml b/vendor/github.com/alessio/shellescape/.goreleaser.yml new file mode 100644 index 000000000..064c9374d --- /dev/null +++ b/vendor/github.com/alessio/shellescape/.goreleaser.yml @@ -0,0 +1,33 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + # You may remove this if you don't use go modules. + - go mod download + # you may remove this if you don't need go generate + - go generate ./... +builds: + - env: + - CGO_ENABLED=0 + main: ./cmd/escargs + goos: + - linux + - windows + - darwin +archives: + - replacements: + darwin: Darwin + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/alessio/shellescape/AUTHORS b/vendor/github.com/alessio/shellescape/AUTHORS new file mode 100644 index 000000000..4a647a6f4 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/AUTHORS @@ -0,0 +1 @@ +Alessio Treglia diff --git a/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md b/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..e8eda6062 --- /dev/null +++ b/vendor/github.com/alessio/shellescape/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at alessio@debian.org. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/alessio/shellescape/LICENSE b/vendor/github.com/alessio/shellescape/LICENSE new file mode 100644 index 000000000..9f760679f --- /dev/null +++ b/vendor/github.com/alessio/shellescape/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Alessio Treglia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alessio/shellescape/README.md b/vendor/github.com/alessio/shellescape/README.md new file mode 100644 index 000000000..910bb253b --- /dev/null +++ b/vendor/github.com/alessio/shellescape/README.md @@ -0,0 +1,61 @@ +![Build](https://github.com/alessio/shellescape/workflows/Build/badge.svg) +[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/alessio/shellescape?tab=overview) +[![sourcegraph](https://sourcegraph.com/github.com/alessio/shellescape/-/badge.svg)](https://sourcegraph.com/github.com/alessio/shellescape) +[![codecov](https://codecov.io/gh/alessio/shellescape/branch/master/graph/badge.svg)](https://codecov.io/gh/alessio/shellescape) +[![Coverage](https://gocover.io/_badge/github.com/alessio/shellescape)](https://gocover.io/github.com/alessio/shellescape) +[![Go Report Card](https://goreportcard.com/badge/github.com/alessio/shellescape)](https://goreportcard.com/report/github.com/alessio/shellescape) + +# shellescape +Escape arbitrary strings for safe use as command line arguments. +## Contents of the package + +This package provides the `shellescape.Quote()` function that returns a +shell-escaped copy of a string. This functionality could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. + +This work was inspired by the Python original package +[shellescape](https://pypi.python.org/pypi/shellescape). + +## Usage + +The following snippet shows a typical unsafe idiom: + +```go +package main + +import ( + "fmt" + "os" +) + +func main() { + fmt.Printf("ls -l %s\n", os.Args[1]) +} +``` +_[See in Go Playground](https://play.golang.org/p/Wj2WoUfH_d)_ + +Especially when creating pipeline of commands which might end up being +executed by a shell interpreter, it is particularly unsafe to not +escape arguments. + +`shellescape.Quote()` comes in handy and to safely escape strings: + +```go +package main + +import ( + "fmt" + "os" + + "gopkg.in/alessio/shellescape.v1" +) + +func main() { + fmt.Printf("ls -l %s\n", shellescape.Quote(os.Args[1])) +} +``` +_[See in Go Playground](https://play.golang.org/p/HJ_CXgSrmp)_ + +## The escargs utility +__escargs__ reads lines from the standard input and prints shell-escaped versions. Unlinke __xargs__, blank lines on the standard input are not discarded. diff --git a/vendor/github.com/alessio/shellescape/shellescape.go b/vendor/github.com/alessio/shellescape/shellescape.go new file mode 100644 index 000000000..dc34a556a --- /dev/null +++ b/vendor/github.com/alessio/shellescape/shellescape.go @@ -0,0 +1,66 @@ +/* +Package shellescape provides the shellescape.Quote to escape arbitrary +strings for a safe use as command line arguments in the most common +POSIX shells. + +The original Python package which this work was inspired by can be found +at https://pypi.python.org/pypi/shellescape. +*/ +package shellescape // "import gopkg.in/alessio/shellescape.v1" + +/* +The functionality provided by shellescape.Quote could be helpful +in those cases where it is known that the output of a Go program will +be appended to/used in the context of shell programs' command line arguments. +*/ + +import ( + "regexp" + "strings" + "unicode" +) + +var pattern *regexp.Regexp + +func init() { + pattern = regexp.MustCompile(`[^\w@%+=:,./-]`) +} + +// Quote returns a shell-escaped version of the string s. The returned value +// is a string that can safely be used as one token in a shell command line. +func Quote(s string) string { + if len(s) == 0 { + return "''" + } + + if pattern.MatchString(s) { + return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'" + } + + return s +} + +// QuoteCommand returns a shell-escaped version of the slice of strings. +// The returned value is a string that can safely be used as shell command arguments. +func QuoteCommand(args []string) string { + l := make([]string, len(args)) + + for i, s := range args { + l[i] = Quote(s) + } + + return strings.Join(l, " ") +} + +// StripUnsafe remove non-printable runes, e.g. control characters in +// a string that is meant for consumption by terminals that support +// control characters. +func StripUnsafe(s string) string { + return strings.Map(func(r rune) rune { + if unicode.IsPrint(r) { + return r + } + + return -1 + }, s) +} diff --git a/vendor/github.com/google/safetext/LICENSE b/vendor/github.com/google/safetext/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/google/safetext/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/safetext/common/common.go b/vendor/github.com/google/safetext/common/common.go new file mode 100644 index 000000000..80a8bbd97 --- /dev/null +++ b/vendor/github.com/google/safetext/common/common.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package common implements common functionality for dealing with text/template. +package common + +import ( + "bytes" + "reflect" + "strings" + "sync" + "text/template" + "text/template/parse" + "unicode" + "unicode/utf8" +) + +// ContainsStringsWithSpecialCharacters determines whether an object contains interface{} strings that contain special characters. +func ContainsStringsWithSpecialCharacters(data interface{}, special string) bool { + if data == nil { + return false + } + + switch reflect.TypeOf(data).Kind() { + case reflect.Ptr: + p := reflect.ValueOf(data) + return !p.IsNil() && ContainsStringsWithSpecialCharacters(p.Elem().Interface(), special) + case reflect.String: + return strings.ContainsAny(reflect.ValueOf(data).String(), special) + case reflect.Slice, reflect.Array: + for i := 0; i < reflect.ValueOf(data).Len(); i++ { + if ContainsStringsWithSpecialCharacters(reflect.ValueOf(data).Index(i).Interface(), special) { + return true + } + } + case reflect.Map: + dataIter := reflect.ValueOf(data).MapRange() + for dataIter.Next() { + if ContainsStringsWithSpecialCharacters(dataIter.Value().Interface(), special) { + return true + } + } + case reflect.Struct: + t := reflect.TypeOf(data) + v := reflect.ValueOf(data) + n := v.NumField() + for i := 0; i < n; i++ { + r, _ := utf8.DecodeRuneInString(t.Field(i).Name) + if unicode.IsUpper(r) && ContainsStringsWithSpecialCharacters(v.Field(i).Interface(), special) { + return true + } + } + } + + return false +} + +// FuncMap to register new template objects with. +var FuncMap = map[string]interface{}{ + "textTemplateRemediationFunc": textTemplateRemediationFunc, + "StructuralData": echo, +} + +func echo(in interface{}) interface{} { + return in +} + +// BaselineString is a string callback function that just returns a constant string, +// used to get a baseline of how the resultant YAML is structured. +func BaselineString(string) string { + return "baseline" +} + +// stringCallback provides the callback for how strings should be manipulated before +// being pasted into the template execution result. +var stringCallback func(string) string +var stringCallbackLock sync.Mutex + +func textTemplateRemediationFunc(data interface{}) interface{} { + return deepCopyMutateStrings(data, stringCallback) +} + +// ExecuteWithCallback performs an execution on a callback-applied template +// (WalkApplyFuncToNonDeclaractiveActions) with a specified callback. +func ExecuteWithCallback(tmpl *template.Template, cb func(string) string, result *bytes.Buffer, data interface{}) error { + stringCallbackLock.Lock() + defer stringCallbackLock.Unlock() + stringCallback = cb + + return tmpl.Execute(result, data) +} + +func makePointer(data interface{}) interface{} { + rtype := reflect.New(reflect.TypeOf(data)) + rtype.Elem().Set(reflect.ValueOf(data)) + return rtype.Interface() +} + +func dereference(data interface{}) interface{} { + return reflect.ValueOf(data).Elem().Interface() +} + +func deepCopyMutateStrings(data interface{}, mutateF func(string) string) interface{} { + var r interface{} + + if data == nil { + return nil + } + + switch reflect.TypeOf(data).Kind() { + case reflect.Ptr: + p := reflect.ValueOf(data) + if p.IsNil() { + r = data + } else { + c := deepCopyMutateStrings(dereference(data), mutateF) + r = makePointer(c) + + // Sometimes we accidentally introduce one too minterface{} layers of indirection (seems related to protobuf generated fields like ReleaseNamespace *ReleaseNamespace `... reflect:"unexport"`) + if reflect.TypeOf(r) != reflect.TypeOf(data) { + r = c + } + } + case reflect.String: + return mutateF(reflect.ValueOf(data).String()) + case reflect.Slice, reflect.Array: + rc := reflect.MakeSlice(reflect.TypeOf(data), reflect.ValueOf(data).Len(), reflect.ValueOf(data).Len()) + for i := 0; i < reflect.ValueOf(data).Len(); i++ { + rc.Index(i).Set(reflect.ValueOf(deepCopyMutateStrings(reflect.ValueOf(data).Index(i).Interface(), mutateF))) + } + r = rc.Interface() + case reflect.Map: + rc := reflect.MakeMap(reflect.TypeOf(data)) + dataIter := reflect.ValueOf(data).MapRange() + for dataIter.Next() { + rc.SetMapIndex(dataIter.Key(), reflect.ValueOf(deepCopyMutateStrings(dataIter.Value().Interface(), mutateF))) + } + r = rc.Interface() + case reflect.Struct: + s := reflect.New(reflect.TypeOf(data)) + + t := reflect.TypeOf(data) + v := reflect.ValueOf(data) + n := v.NumField() + for i := 0; i < n; i++ { + r, _ := utf8.DecodeRuneInString(t.Field(i).Name) + + // Don't copy unexported fields + if unicode.IsUpper(r) { + reflect.Indirect(s).Field(i).Set( + reflect.ValueOf(deepCopyMutateStrings(v.Field(i).Interface(), mutateF)), + ) + } + } + + r = s.Interface() + default: + // No other types need special handling (int, bool, etc) + r = data + } + + return r +} + +func applyPipeCmds(cmds []*parse.CommandNode) { + applyFunc := "textTemplateRemediationFunc" + + for _, c := range cmds { + newArgs := make([]parse.Node, 0) + for i, a := range c.Args { + switch a := a.(type) { + case *parse.DotNode, *parse.FieldNode, *parse.VariableNode: + if i == 0 && len(c.Args) > 1 { + // If this is the first "argument" of multiple, then it is really a function + newArgs = append(newArgs, a) + } else { + // If this node is an argument to a call to "StructuralData", then pass it through as-is + switch identifier := c.Args[0].(type) { + case *parse.IdentifierNode: + if identifier.Ident == "StructuralData" { + newArgs = append(newArgs, a) + continue + } + } + + newPipe := &parse.PipeNode{NodeType: parse.NodePipe, Decl: nil} + newPipe.Cmds = []*parse.CommandNode{ + &parse.CommandNode{NodeType: parse.NodeCommand, Args: []parse.Node{a}}, + &parse.CommandNode{NodeType: parse.NodeCommand, Args: []parse.Node{&parse.IdentifierNode{NodeType: parse.NodeIdentifier, Ident: applyFunc}}}, + } + newArgs = append(newArgs, newPipe) + } + case *parse.PipeNode: + applyPipeCmds(a.Cmds) + newArgs = append(newArgs, a) + default: + newArgs = append(newArgs, a) + } + } + + c.Args = newArgs + } +} + +func branchNode(node parse.Node) *parse.BranchNode { + switch node := node.(type) { + case *parse.IfNode: + return &node.BranchNode + case *parse.RangeNode: + return &node.BranchNode + case *parse.WithNode: + return &node.BranchNode + } + + return nil +} + +// WalkApplyFuncToNonDeclaractiveActions walks the AST, applying a pipeline function to interface{} "paste" nodes (non-declarative action nodes) +func WalkApplyFuncToNonDeclaractiveActions(template *template.Template, node parse.Node) { + switch node := node.(type) { + case *parse.ActionNode: + // Non-declarative actions are paste actions + if len(node.Pipe.Decl) == 0 { + applyPipeCmds(node.Pipe.Cmds) + } + + case *parse.IfNode, *parse.RangeNode, *parse.WithNode: + nodeBranch := branchNode(node) + WalkApplyFuncToNonDeclaractiveActions(template, nodeBranch.List) + if nodeBranch.ElseList != nil { + WalkApplyFuncToNonDeclaractiveActions(template, nodeBranch.ElseList) + } + case *parse.ListNode: + for _, node := range node.Nodes { + WalkApplyFuncToNonDeclaractiveActions(template, node) + } + case *parse.TemplateNode: + tmpl := template.Lookup(node.Name) + if tmpl != nil { + treeCopy := tmpl.Tree.Copy() + WalkApplyFuncToNonDeclaractiveActions(tmpl, treeCopy.Root) + template.AddParseTree(node.Name, treeCopy) + } + } +} diff --git a/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go b/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go new file mode 100644 index 000000000..47d8f2838 --- /dev/null +++ b/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go @@ -0,0 +1,657 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package yamltemplate is a drop-in-replacement for using text/template to produce YAML, that adds automatic detection for YAML injection +package yamltemplate + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "reflect" + "text/template" + "text/template/parse" + "unicode" + "unicode/utf8" + + "gopkg.in/yaml.v3" + + "github.com/google/safetext/common" +) + +// ErrInvalidYAMLTemplate indicates the requested template is not valid YAML. +var ErrInvalidYAMLTemplate error = errors.New("Invalid YAML Template") + +// ErrYAMLInjection indicates the inputs resulted in YAML injection. +var ErrYAMLInjection error = errors.New("YAML Injection Detected") + +// ExecError is the custom error type returned when Execute has an +// error evaluating its template. (If a write error occurs, the actual +// error is returned; it will not be of type ExecError.) +type ExecError = template.ExecError + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +// +// Errors returned by Execute wrap the underlying error; call errors.As to +// uncover them. +// +// When template execution invokes a function with an argument list, that list +// must be assignable to the function's parameter types. Functions meant to +// apply to arguments of arbitrary type can use parameters of type interface{} or +// of type reflect.Value. Similarly, functions meant to return a result of arbitrary +// type can return interface{} or reflect.Value. +type FuncMap = template.FuncMap + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + unsafeTemplate *template.Template +} + +// New allocates a new, undefined template with the given name. +func New(name string) *Template { + return &Template{unsafeTemplate: template.New(name).Funcs(common.FuncMap)} +} + +const yamlSpecialCharacters = "{}[]&*#?|-.<>=!%@:\"'`,\r\n" + +func mapOrArray(in interface{}) bool { + return in != nil && (reflect.TypeOf(in).Kind() == reflect.Map || reflect.TypeOf(in).Kind() == reflect.Slice || reflect.TypeOf(in).Kind() == reflect.Array) +} + +func allKeysMatch(base interface{}, a interface{}, b interface{}) bool { + if base == nil { + return a == nil && b == nil + } + + switch reflect.TypeOf(base).Kind() { + case reflect.Ptr: + if reflect.TypeOf(a).Kind() != reflect.Ptr || reflect.TypeOf(b).Kind() != reflect.Ptr { + return false + } + + if !allKeysMatch(reflect.ValueOf(base).Elem().Interface(), reflect.ValueOf(a).Elem().Interface(), reflect.ValueOf(b).Elem().Interface()) { + return true + } + case reflect.Map: + if reflect.TypeOf(a).Kind() != reflect.Map || reflect.TypeOf(b).Kind() != reflect.Map { + return false + } + + if reflect.ValueOf(a).Len() != reflect.ValueOf(base).Len() || reflect.ValueOf(b).Len() != reflect.ValueOf(base).Len() { + return false + } + + basei := reflect.ValueOf(base).MapRange() + for basei.Next() { + av := reflect.ValueOf(a).MapIndex(basei.Key()) + bv := reflect.ValueOf(b).MapIndex(basei.Key()) + if !av.IsValid() || !bv.IsValid() || + !allKeysMatch(basei.Value().Interface(), av.Interface(), bv.Interface()) { + return false + } + } + case reflect.Slice, reflect.Array: + if reflect.TypeOf(a).Kind() != reflect.Slice && reflect.TypeOf(a).Kind() != reflect.Array && + reflect.TypeOf(b).Kind() != reflect.Slice && reflect.TypeOf(b).Kind() != reflect.Array { + return false + } + + if reflect.ValueOf(a).Len() != reflect.ValueOf(base).Len() || reflect.ValueOf(b).Len() != reflect.ValueOf(base).Len() { + return false + } + + for i := 0; i < reflect.ValueOf(base).Len(); i++ { + if !allKeysMatch(reflect.ValueOf(base).Index(i).Interface(), reflect.ValueOf(a).Index(i).Interface(), reflect.ValueOf(b).Index(i).Interface()) { + return false + } + } + case reflect.Struct: + n := reflect.ValueOf(base).NumField() + for i := 0; i < n; i++ { + baseit := reflect.TypeOf(base).Field(i) + ait := reflect.TypeOf(a).Field(i) + bit := reflect.TypeOf(b).Field(i) + + if baseit.Name != ait.Name || baseit.Name != bit.Name { + return false + } + + // Only compare public members (private members cannot be overwritten by text/template) + decodedName, _ := utf8.DecodeRuneInString(baseit.Name) + if unicode.IsUpper(decodedName) { + basei := reflect.ValueOf(base).Field(i) + ai := reflect.ValueOf(a).Field(i) + bi := reflect.ValueOf(b).Field(i) + + if !allKeysMatch(basei.Interface(), ai.Interface(), bi.Interface()) { + return false + } + } + } + case reflect.String: + // Baseline type of string was chosen arbitrarily, so just check that there isn't a new a map or slice/array injected, which would change the structure of the YAML + if mapOrArray(a) || mapOrArray(b) { + return false + } + default: + if reflect.TypeOf(a) != reflect.TypeOf(base) || reflect.TypeOf(b) != reflect.TypeOf(base) { + return false + } + } + + return true +} + +func unmarshalYaml(data []byte) ([]interface{}, error) { + r := make([]interface{}, 0) + + decoder := yaml.NewDecoder(bytes.NewReader(data)) + for { + var t interface{} + err := decoder.Decode(&t) + + if err == io.EOF { + break + } else if err == nil { + r = append(r, t) + } else { + return nil, err + } + } + + return r, nil +} + +// Mutation algorithm +func mutateString(s string) string { + // Longest possible output string is 2x the original + out := make([]rune, len(s)*2) + + i := 0 + for _, r := range s { + out[i] = r + i++ + + // Don't repeat quoting-related characters so as to not allow YAML context change in the mutation result + if r != '\\' && r != '\'' && r != '"' { + out[i] = r + i++ + } + } + + return string(out[:i]) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel, although if parallel +// executions share a Writer the output may be interleaved. +// +// If data is a reflect.Value, the template applies to the concrete +// value that the reflect.Value holds, as in fmt.Print. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + if data == nil { + return t.unsafeTemplate.Execute(wr, data) + } + + // An attacker may be able to cause type confusion or nil dereference panic during allKeysMatch + defer func() { + if r := recover(); r != nil { + err = ErrYAMLInjection + } + }() + + // Calculate requested result first + var requestedResult bytes.Buffer + + if err := t.unsafeTemplate.Execute(&requestedResult, data); err != nil { + return err + } + + // Fast path for if there are no YAML special characters in the input strings + if !common.ContainsStringsWithSpecialCharacters(data, yamlSpecialCharacters) { + // Note: We assume the result was valid YAML, and don't check for ErrInvalidYAMLTemplate + requestedResult.WriteTo(wr) + return nil + } + + walked, err := t.unsafeTemplate.Clone() + if err != nil { + return err + } + walked.Tree = walked.Tree.Copy() + + common.WalkApplyFuncToNonDeclaractiveActions(walked, walked.Tree.Root) + + // Get baseline + var baselineResult bytes.Buffer + if err = common.ExecuteWithCallback(walked, common.BaselineString, &baselineResult, data); err != nil { + return err + } + + parsedBaselineResult, err := unmarshalYaml(baselineResult.Bytes()) + if err != nil { + return ErrInvalidYAMLTemplate + } + + // If baseline was valid, request must also be valid YAML for no injection to have occurred + parsedRequestedResult, err := unmarshalYaml(requestedResult.Bytes()) + if err != nil { + return ErrYAMLInjection + } + + // Mutate the input + var mutatedResult bytes.Buffer + if err = common.ExecuteWithCallback(walked, mutateString, &mutatedResult, data); err != nil { + return err + } + + parsedMutatedResult, err := unmarshalYaml(mutatedResult.Bytes()) + if err != nil { + return ErrYAMLInjection + } + + // Compare results + if !allKeysMatch(parsedBaselineResult, parsedRequestedResult, parsedMutatedResult) { + return ErrYAMLInjection + } + + requestedResult.WriteTo(wr) + return nil +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.unsafeTemplate.Name() +} + +// New allocates a new, undefined template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +// +// Because associated templates share underlying data, template construction +// cannot be done safely in parallel. Once the templates are constructed, they +// can be executed in parallel. +func (t *Template) New(name string) *Template { + return &Template{unsafeTemplate: t.unsafeTemplate.New(name).Funcs(common.FuncMap)} +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt, err := t.unsafeTemplate.Clone() + return &Template{unsafeTemplate: nt}, err +} + +// AddParseTree associates the argument parse tree with the template t, giving +// it the specified name. If the template has not been defined, this tree becomes +// its definition. If it has been defined and already has that name, the existing +// definition is replaced; otherwise a new template is created, defined, and returned. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + nt, err := t.unsafeTemplate.AddParseTree(name, tree) + + if nt != t.unsafeTemplate { + return &Template{unsafeTemplate: nt}, err + } + return t, err +} + +// Option sets options for the template. Options are described by +// strings, either a simple string or "key=value". There can be at +// most one equals sign in an option string. If the option string +// is unrecognized or otherwise invalid, Option panics. +// +// Known options: +// +// missingkey: Control the behavior during execution if a map is +// indexed with a key that is not present in the map. +// +// "missingkey=default" or "missingkey=invalid" +// The default behavior: Do nothing and continue execution. +// If printed, the result of the index operation is the string +// "". +// "missingkey=zero" +// The operation returns the zero value for the map type's element. +// "missingkey=error" +// Execution stops immediately with an error. +func (t *Template) Option(opt ...string) *Template { + for _, s := range opt { + t.unsafeTemplate.Option(s) + } + return t +} + +// Templates returns a slice of defined templates associated with t. +func (t *Template) Templates() []*Template { + s := t.unsafeTemplate.Templates() + + var ns []*Template + for _, nt := range s { + ns = append(ns, &Template{unsafeTemplate: nt}) + } + + return ns +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel, although if parallel +// executions share a Writer the output may be interleaved. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.Lookup(name) + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.Name()) + } + return tmpl.Execute(wr, data) +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.unsafeTemplate.Delims(left, right) + return t +} + +// DefinedTemplates returns a string listing the defined templates, +// prefixed by the string "; defined templates are: ". If there are none, +// it returns the empty string. For generating an error message here +// and in html/template. +func (t *Template) DefinedTemplates() string { + return t.unsafeTemplate.DefinedTemplates() +} + +// Funcs adds the elements of the argument map to the template's function map. +// It must be called before the template is parsed. +// It panics if a value in the map is not a function with appropriate return +// type or if the name cannot be used syntactically as a function in a template. +// It is legal to overwrite elements of the map. The return value is the template, +// so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.unsafeTemplate.Funcs(funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t. +// It returns nil if there is no such template or the template has no definition. +func (t *Template) Lookup(name string) *Template { + nt := t.unsafeTemplate.Lookup(name) + + if nt == nil { + return nil + } + + if nt != t.unsafeTemplate { + return &Template{unsafeTemplate: nt} + } + + return t +} + +// Parse parses text as a template body for t. +// Named template definitions ({{define ...}} or {{block ...}} statements) in text +// define additional templates associated with t and are removed from the +// definition of t itself. +// +// Templates can be redefined in successive calls to Parse. +// A template definition with a body containing only white space and comments +// is considered empty and will not replace an existing template's body. +// This allows using Parse to add new named template definitions without +// overwriting the main template body. +func (t *Template) Parse(text string) (*Template, error) { + nt, err := t.unsafeTemplate.Parse(text) + + if nt != t.unsafeTemplate { + return &Template{unsafeTemplate: nt}, err + } + + return t, err +} + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +func readFileOS(file string) (name string, b []byte, err error) { + name = filepath.Base(file) + b, err = os.ReadFile(file) + return +} + +func readFileFS(fsys fs.FS) func(string) (string, []byte, error) { + return func(file string) (name string, b []byte, err error) { + name = path.Base(file) + b, err = fs.ReadFile(fsys, file) + return + } +} + +func parseFiles(t *Template, readFile func(string) (string, []byte, error), filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + name, b, err := readFile(filename) + if err != nil { + return nil, err + } + s := string(b) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, readFileOS, filenames...) +} + +func parseFS(t *Template, fsys fs.FS, patterns []string) (*Template, error) { + var filenames []string + for _, pattern := range patterns { + list, err := fs.Glob(fsys, pattern) + if err != nil { + return nil, err + } + if len(list) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + filenames = append(filenames, list...) + } + return parseFiles(t, readFileFS(fsys), filenames...) +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the base name and +// parsed contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +// For instance, ParseFiles("a/foo", "b/foo") stores "b/foo" as the template +// named "foo", while "a/foo" is unavailable. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, readFileOS, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +// Since the templates created by ParseFiles are named by the base +// names of the argument files, t should usually have the name of one +// of the (base) names of the files. If it does not, depending on t's +// contents before calling ParseFiles, t.Execute may fail. In that +// case use t.ExecuteTemplate to execute a valid template. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + // Ensure template is inited + t.Option() + + return parseFiles(t, readFileOS, filenames...) +} + +// ParseGlob creates a new Template and parses the template definitions from +// the files identified by the pattern. The files are matched according to the +// semantics of filepath.Match, and the pattern must match at least one file. +// The returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The files are matched +// according to the semantics of filepath.Match, and the pattern must match at +// least one file. ParseGlob is equivalent to calling t.ParseFiles with the +// list of files matched by the pattern. +// +// When parsing multiple files with the same name in different directories, +// the last one mentioned will be the one that results. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + // Ensure template is inited + t.Option() + + return parseGlob(t, pattern) +} + +// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys +// instead of the host operating system's file system. +// It accepts a list of glob patterns. +// (Note that most file names serve as glob patterns matching only themselves.) +func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) { + return parseFS(nil, fsys, patterns) +} + +// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys +// instead of the host operating system's file system. +// It accepts a list of glob patterns. +// (Note that most file names serve as glob patterns matching only themselves.) +func (t *Template) ParseFS(fsys fs.FS, patterns ...string) (*Template, error) { + // Ensure template is inited + t.Option() + + return parseFS(t, fsys, patterns) +} + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + template.HTMLEscape(w, b) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + return template.HTMLEscapeString(s) +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return template.HTMLEscaper(args) +} + +// IsTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. This is the definition of +// truth used by if and other such actions. +func IsTrue(val interface{}) (truth, ok bool) { + return template.IsTrue(val) +} + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + template.JSEscape(w, b) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + return template.JSEscapeString(s) +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return template.JSEscaper(args) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return template.URLQueryEscaper(args) +} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore new file mode 100644 index 000000000..7b5883475 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore new file mode 100644 index 000000000..e6ba63a5c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -0,0 +1,5 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md new file mode 100644 index 000000000..98b9893d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md @@ -0,0 +1,132 @@ +## Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use TOML +implementation for Go that gets the job done and gets out of your way – +dealing with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or +small, is more than welcomed! + +### Ask questions + +Any question you may have, somebody else might have it too. Always feel +free to ask them on the [issues tracker][issues-tracker]. We will try to +answer them as clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and +ask away! + +### Improve the documentation + +The best way to share your knowledge and experience with go-toml is to +improve the documentation. Fix a typo, clarify an interface, add an +example, anything goes! + +The documentation is present in the [README][readme] and thorough the +source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a +change to the documentation, create a pull request with your proposed +changes. For simple changes like that, the easiest way to go is probably +the "Fork this project and edit the file" button on Github, displayed at +the top right of the file. Unless it's a trivial change (for example a +typo), provide a little bit of context in your pull request description or +commit message. + +### Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and +fix by reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on +what to include. When in doubt: add more details! By reducing ambiguity and +providing more information, it decreases back and forth and saves everyone +time. + +### Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +* A short proposal with some POC code is better than a lengthy piece of + text with no code. Code speaks louder than words. +* No backward-incompatible patch will be accepted unless discussed. + Sometimes it's hard, and Go's lack of versioning by default does not + help, but we try not to break people's programs unless we absolutely have + to. +* If you are writing a new feature or extending an existing one, make sure + to write some documentation. +* Bug fixes need to be accompanied with regression tests. +* New code needs to be tested. +* Your commit messages need to explain why the change is needed, even if + already included in the PR description. + +It does sound like a lot, but those best practices are here to save time +overall and continuously improve the quality of the project, which is +something everyone benefits from. + +#### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. You're in! + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +#### Run the tests + +You can run tests for go-toml using Go's test tool: `go test ./...`. +When creating a pull requests, all tests will be ran on Linux on a few Go +versions (Travis CI), and on Windows using the latest Go version +(AppVeyor). + +#### Style + +Try to look around and follow the same format and structure as the rest of +the code. We enforce using `go fmt` on the whole code base. + +--- + +### Maintainers-only + +#### Merge pull request + +Checklist: + +* Passing CI. +* Does not introduce backward-incompatible changes (unless discussed). +* Has relevant doc changes. +* Has relevant unit tests. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +#### New release + +1. Go to [releases][releases]. Click on "X commits to master since this + release". +2. Make note of all the changes. Look for backward incompatible changes, + new features, and bug fixes. +3. Pick the new version using the above and semver. +4. Create a [new release][new-release]. +5. Follow the same format as [1.1.0][release-110]. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[releases]: https://github.com/pelletier/go-toml/releases +[new-release]: https://github.com/pelletier/go-toml/releases/new +[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile new file mode 100644 index 000000000..fffdb0166 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.12-alpine3.9 as builder +WORKDIR /go/src/github.com/pelletier/go-toml +COPY . . +ENV CGO_ENABLED=0 +ENV GOOS=linux +RUN go install ./... + +FROM scratch +COPY --from=builder /go/bin/tomll /usr/bin/tomll +COPY --from=builder /go/bin/tomljson /usr/bin/tomljson +COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 000000000..f414553c2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,247 @@ +The bulk of github.com/pelletier/go-toml is distributed under the MIT license +(see below), with the exception of localtime.go and localtime.test.go. +Those two files have been copied over from Google's civil library at revision +ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache +2.0 license (see below). + + +github.com/pelletier/go-toml: + + +The MIT License (MIT) + +Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +localtime.go, localtime_test.go: + +Originals: + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go +Changes: + * Renamed files from civil* to localtime*. + * Package changed from civil to toml. + * 'Local' prefix added to all structs. +License: + https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile new file mode 100644 index 000000000..9e4503aea --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/Makefile @@ -0,0 +1,29 @@ +export CGO_ENABLED=0 +go := go +go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) +go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) + +out.tools := tomll tomljson jsontoml +out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) +sources := $(wildcard **/*.go) + + +.PHONY: +tools: $(out.tools) + +$(out.tools): $(sources) + GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ + +.PHONY: +dist: $(out.dist) + +$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % + if [ "$(go.goos)" = "windows" ]; then \ + tar -cJf $@ $^.exe; \ + else \ + tar -cJf $@ $^; \ + fi + +.PHONY: +clean: + rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..041cdc4a2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +**Issue:** add link to pelletier/go-toml issue here + +Explanation of what this pull request does. + +More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md new file mode 100644 index 000000000..7399e04bf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -0,0 +1,176 @@ +# go-toml + +Go library for the [TOML](https://toml.io/) format. + +This library supports TOML version +[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) +[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) +[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) +[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) +[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) + + +## Development status + +**ℹ️ Consider go-toml v2!** + +The next version of go-toml is in [active development][v2-dev], and +[nearing completion][v2-map]. + +Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], +and [much faster][v2-bench]. If you only need reading and writing TOML documents +(majority of cases), those features are implemented and the API unlikely to +change. + +The remaining features will be added shortly. While pull-requests are welcome on +v1, no active development is expected on it. When v2.0.0 is released, v1 will be +deprecated. + +👉 [go-toml v2][v2] + +[v2]: https://github.com/pelletier/go-toml/tree/v2 +[v2-map]: https://github.com/pelletier/go-toml/discussions/506 +[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 +[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed +[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks + +## Features + +Go-toml provides the following features for using data parsed from TOML documents: + +* Load TOML documents from files and string data +* Easily navigate TOML structure using Tree +* Marshaling and unmarshaling to and from data structures +* Line & column position data for all parsed elements +* [Query support similar to JSON-Path](query/) +* Syntax errors contain line and column numbers + +## Import + +```go +import "github.com/pelletier/go-toml" +``` + +## Usage example + +Read a TOML document: + +```go +config, _ := toml.Load(` +[postgres] +user = "pelletier" +password = "mypassword"`) +// retrieve data directly +user := config.Get("postgres.user").(string) + +// or using an intermediate object +postgresConfig := config.Get("postgres").(*toml.Tree) +password := postgresConfig.Get("password").(string) +``` + +Or use Unmarshal: + +```go +type Postgres struct { + User string + Password string +} +type Config struct { + Postgres Postgres +} + +doc := []byte(` +[Postgres] +User = "pelletier" +Password = "mypassword"`) + +config := Config{} +toml.Unmarshal(doc, &config) +fmt.Println("user=", config.Postgres.User) +``` + +Or use a query: + +```go +// use a query to gather elements without walking the tree +q, _ := query.Compile("$..[user,password]") +results := q.Execute(config) +for ii, item := range results.Values() { + fmt.Printf("Query result %d: %v\n", ii, item) +} +``` + +## Documentation + +The documentation and additional examples are available at +[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). + +## Tools + +Go-toml provides three handy command line tools: + +* `tomll`: Reads TOML files and lints them. + + ``` + go install github.com/pelletier/go-toml/cmd/tomll + tomll --help + ``` +* `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + go install github.com/pelletier/go-toml/cmd/tomljson + tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + go install github.com/pelletier/go-toml/cmd/jsontoml + jsontoml --help + ``` + +### Docker image + +Those tools are also available as a Docker image from +[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to +use `tomljson`: + +``` +docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml +``` + +Only master (`latest`) and tagged versions are published to dockerhub. You +can build your own image as usual: + +``` +docker build -t go-toml . +``` + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be +much appreciated! + +### Run tests + +`go test ./...` + +### Fuzzing + +The script `./fuzz.sh` is available to +run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. + +## Versioning + +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version +of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of +this document. The last two major versions of Go are supported +(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md new file mode 100644 index 000000000..b2f21cfc9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Use this section to tell people about which versions of your project are +currently being supported with security updates. + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml new file mode 100644 index 000000000..4af198b4d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml @@ -0,0 +1,188 @@ +trigger: +- master + +stages: +- stage: run_checks + displayName: "Check" + dependsOn: [] + jobs: + - job: fmt + displayName: "fmt" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - task: Go@0 + displayName: "go fmt ./..." + inputs: + command: 'custom' + customCommand: 'fmt' + arguments: './...' + - job: coverage + displayName: "coverage" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - task: Go@0 + displayName: "Generate coverage" + inputs: + command: 'test' + arguments: "-race -coverprofile=coverage.txt -covermode=atomic" + - task: Bash@3 + inputs: + targetType: 'inline' + script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + - job: benchmark + displayName: "benchmark" + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go 1.16" + inputs: + version: "1.16" + - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" + - task: Bash@3 + inputs: + filePath: './benchmark.sh' + arguments: "master $(Build.Repository.Uri)" + + - job: go_unit_tests + displayName: "unit tests" + strategy: + matrix: + linux 1.16: + goVersion: '1.16' + imageName: 'ubuntu-latest' + mac 1.16: + goVersion: '1.16' + imageName: 'macOS-latest' + windows 1.16: + goVersion: '1.16' + imageName: 'windows-latest' + linux 1.15: + goVersion: '1.15' + imageName: 'ubuntu-latest' + mac 1.15: + goVersion: '1.15' + imageName: 'macOS-latest' + windows 1.15: + goVersion: '1.15' + imageName: 'windows-latest' + pool: + vmImage: $(imageName) + steps: + - task: GoTool@0 + displayName: "Install Go $(goVersion)" + inputs: + version: $(goVersion) + - task: Go@0 + displayName: "go test ./..." + inputs: + command: 'test' + arguments: './...' +- stage: build_binaries + displayName: "Build binaries" + dependsOn: run_checks + jobs: + - job: build_binary + displayName: "Build binary" + strategy: + matrix: + linux_amd64: + GOOS: linux + GOARCH: amd64 + darwin_amd64: + GOOS: darwin + GOARCH: amd64 + windows_amd64: + GOOS: windows + GOARCH: amd64 + pool: + vmImage: ubuntu-latest + steps: + - task: GoTool@0 + displayName: "Install Go" + inputs: + version: 1.16 + - task: Bash@3 + inputs: + targetType: inline + script: "make dist" + env: + go.goos: $(GOOS) + go.goarch: $(GOARCH) + - task: CopyFiles@2 + inputs: + sourceFolder: '$(Build.SourcesDirectory)' + contents: '*.tar.xz' + TargetFolder: '$(Build.ArtifactStagingDirectory)' + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: binaries +- stage: build_binaries_manifest + displayName: "Build binaries manifest" + dependsOn: build_binaries + jobs: + - job: build_manifest + displayName: "Build binaries manifest" + steps: + - task: DownloadBuildArtifacts@0 + inputs: + buildType: 'current' + downloadType: 'single' + artifactName: 'binaries' + downloadPath: '$(Build.SourcesDirectory)' + - task: Bash@3 + inputs: + targetType: inline + script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: manifest + +- stage: build_docker_image + displayName: "Build Docker image" + dependsOn: run_checks + jobs: + - job: build + displayName: "Build" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + command: 'build' + Dockerfile: 'Dockerfile' + buildContext: '.' + addPipelineData: false + +- stage: publish_docker_image + displayName: "Publish Docker image" + dependsOn: build_docker_image + condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) + jobs: + - job: publish + displayName: "Publish" + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + inputs: + containerRegistry: 'DockerHub' + repository: 'pelletier/go-toml' + command: 'buildAndPush' + Dockerfile: 'Dockerfile' + buildContext: '.' + tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh new file mode 100644 index 000000000..a69d3040f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -ex + +reference_ref=${1:-master} +reference_git=${2:-.} + +if ! `hash benchstat 2>/dev/null`; then + echo "Installing benchstat" + go get golang.org/x/perf/cmd/benchstat +fi + +tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` +ref_tempdir="${tempdir}/ref" +ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" +local_benchmark="`pwd`/benchmark-local.txt" + +echo "=== ${reference_ref} (${ref_tempdir})" +git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null +pushd ${ref_tempdir} >/dev/null +git checkout ${reference_ref} >/dev/null 2>/dev/null +go test -bench=. -benchmem | tee ${ref_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${ref_benchmark} +popd >/dev/null + +echo "" +echo "=== local" +go test -bench=. -benchmem | tee ${local_benchmark} +cd benchmark +go test -bench=. -benchmem | tee -a ${local_benchmark} + +echo "" +echo "=== diff" +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 000000000..a1406a32b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml new file mode 100644 index 000000000..780d9c68f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml new file mode 100644 index 000000000..f45bf88b8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -0,0 +1,30 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it +score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 000000000..14570c8d3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh new file mode 100644 index 000000000..3204b4c44 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.sh @@ -0,0 +1,15 @@ +#! /bin/sh +set -eu + +go get github.com/dvyukov/go-fuzz/go-fuzz +go get github.com/dvyukov/go-fuzz/go-fuzz-build + +if [ ! -e toml-fuzz.zip ]; then + go-fuzz-build github.com/pelletier/go-toml +fi + +rm -fr fuzz +mkdir -p fuzz/corpus +cp *.toml fuzz/corpus + +go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 000000000..e091500b2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,112 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "errors" + "fmt" +) + +// Convert the bare key group string to an array. +// The input supports double quotation and single quotation, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + runes := []rune(key) + var groups []string + + if len(key) == 0 { + return nil, errors.New("empty key") + } + + idx := 0 + for idx < len(runes) { + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip leading whitespace + } + if idx >= len(runes) { + break + } + r := runes[idx] + if isValidBareChar(r) { + // parse bare key + startIdx := idx + endIdx := -1 + idx++ + for idx < len(runes) { + r = runes[idx] + if isValidBareChar(r) { + idx++ + } else if r == '.' { + endIdx = idx + break + } else if isSpace(r) { + endIdx = idx + for ; idx < len(runes) && isSpace(runes[idx]); idx++ { + // skip trailing whitespace + } + if idx < len(runes) && runes[idx] != '.' { + return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) + } + break + } else { + return nil, fmt.Errorf("invalid bare key character: %c", r) + } + } + if endIdx == -1 { + endIdx = idx + } + groups = append(groups, string(runes[startIdx:endIdx])) + } else if r == '\'' { + // parse single quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed single-quoted key") + } + r = runes[idx] + if r == '\'' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '"' { + // parse double quoted key + idx++ + startIdx := idx + for { + if idx >= len(runes) { + return nil, fmt.Errorf("unclosed double-quoted key") + } + r = runes[idx] + if r == '"' { + groups = append(groups, string(runes[startIdx:idx])) + idx++ + break + } + idx++ + } + } else if r == '.' { + idx++ + if idx >= len(runes) { + return nil, fmt.Errorf("unexpected end of key") + } + r = runes[idx] + if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { + return nil, fmt.Errorf("expecting key part after dot") + } + } else { + return nil, fmt.Errorf("invalid key character: %c", r) + } + } + if len(groups) == 0 { + return nil, fmt.Errorf("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || isDigit(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 000000000..313908e3e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,1031 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + brackets []rune + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '}': // after '{' + return l.lexRightCurlyBrace + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + return l.lexLeftBracket + case ']': + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { + return l.lexRvalue + } + return l.lexVoid + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + if next == '+' || next == '-' { + return l.lexNumber + } + + if isDigit(next) { + return l.lexDateTimeOrNumber + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { + // Could be either a date/time, or a digit. + // The options for date/times are: + // YYYY-... => date or date-time + // HH:... => time + // Anything else should be a number. + + lookAhead := l.peekString(5) + if len(lookAhead) < 3 { + return l.lexNumber() + } + + for idx, r := range lookAhead { + if !isDigit(r) { + if idx == 2 && r == ':' { + return l.lexDateTimeOrTime() + } + if idx == 4 && r == '-' { + return l.lexDateTimeOrTime() + } + return l.lexNumber() + } + } + return l.lexNumber() +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + l.brackets = append(l.brackets, '{') + return l.lexVoid +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { + return l.errorf("cannot have '}' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { + // Example matches: + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + // 07:32:00 + // 00:32:00.999999 + + // we already know those two are digits + l.next() + l.next() + + // Got 2 digits. At that point it could be either a time or a date(-time). + + r := l.next() + if r == ':' { + return l.lexTime() + } + + return l.lexDateTime() +} + +func (l *tomlLexer) lexDateTime() tomlLexStateFn { + // This state accepts an offset date-time, a local date-time, or a local date. + // + // v--- cursor + // 1979-05-27T07:32:00Z + // 1979-05-27T00:32:00-07:00 + // 1979-05-27T00:32:00.999999-07:00 + // 1979-05-27 07:32:00Z + // 1979-05-27 00:32:00-07:00 + // 1979-05-27 00:32:00.999999-07:00 + // 1979-05-27T07:32:00 + // 1979-05-27T00:32:00.999999 + // 1979-05-27 07:32:00 + // 1979-05-27 00:32:00.999999 + // 1979-05-27 + + // date + + // already checked by lexRvalue + l.next() // digit + l.next() // - + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid month digit in date: %c", r) + } + } + + r := l.next() + if r != '-' { + return l.errorf("expected - to separate month of a date, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid day digit in date: %c", r) + } + } + + l.emit(tokenLocalDate) + + r = l.peek() + + if r == eof { + + return l.lexRvalue + } + + if r != ' ' && r != 'T' { + return l.errorf("incorrect date/time separation character: %c", r) + } + + if r == ' ' { + lookAhead := l.peekString(3)[1:] + if len(lookAhead) < 2 { + return l.lexRvalue + } + for _, r := range lookAhead { + if !isDigit(r) { + return l.lexRvalue + } + } + } + + l.skip() // skip the T or ' ' + + // time + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid hour digit in time: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time hour/minute separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time minute/second separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid second digit in time: %c", r) + } + } + + r = l.peek() + if r == '.' { + l.next() + r := l.next() + if !isDigit(r) { + return l.errorf("expected at least one digit in time's fraction, not %c", r) + } + + for { + r := l.peek() + if !isDigit(r) { + break + } + l.next() + } + } + + l.emit(tokenLocalTime) + + return l.lexTimeOffset + +} + +func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { + // potential offset + + // Z + // -07:00 + // +07:00 + // nothing + + r := l.peek() + + if r == 'Z' { + l.next() + l.emit(tokenTimeOffset) + } else if r == '+' || r == '-' { + l.next() + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid hour digit in time offset: %c", r) + } + } + + r = l.next() + if r != ':' { + return l.errorf("time offset hour/minute separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time offset: %c", r) + } + } + + l.emit(tokenTimeOffset) + } + + return l.lexRvalue +} + +func (l *tomlLexer) lexTime() tomlLexStateFn { + // v--- cursor + // 07:32:00 + // 00:32:00.999999 + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid minute digit in time: %c", r) + } + } + + r := l.next() + if r != ':' { + return l.errorf("time minute/second separator should be :, not %c", r) + } + + for i := 0; i < 2; i++ { + r := l.next() + if !isDigit(r) { + return l.errorf("invalid second digit in time: %c", r) + } + } + + r = l.peek() + if r == '.' { + l.next() + r := l.next() + if !isDigit(r) { + return l.errorf("expected at least one digit in time's fraction, not %c", r) + } + + for { + r := l.peek() + if !isDigit(r) { + break + } + l.next() + } + } + + l.emit(tokenLocalTime) + return l.lexRvalue + +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { + return l.lexVoid + } + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + var sb strings.Builder + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("\"") + sb.WriteString(str) + sb.WriteString("\"") + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + sb.WriteString("'") + sb.WriteString(str) + sb.WriteString("'") + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + var str strings.Builder + str.WriteString(" ") + + // skip trailing whitespace + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + // break loop if not a dot + if r != '.' { + break + } + str.WriteString(".") + // skip trailing whitespace after dot + l.next() + for r = l.peek(); isSpace(r); r = l.peek() { + str.WriteRune(r) + l.next() + } + sb.WriteString(str.String()) + continue + } else if r == '.' { + // skip + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + sb.WriteRune(r) + l.next() + } + l.emitWithValue(tokenKey, sb.String()) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + l.brackets = append(l.brackets, '[') + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return sb.String(), nil + } + + next := l.peek() + if next == eof { + break + } + sb.WriteRune(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + var sb strings.Builder + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return sb.String(), nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + sb.WriteString("\"") + l.next() + case 'n': + sb.WriteString("\n") + l.next() + case 'b': + sb.WriteString("\b") + l.next() + case 'f': + sb.WriteString("\f") + l.next() + case '/': + sb.WriteString("/") + l.next() + case 't': + sb.WriteString("\t") + l.next() + case 'r': + sb.WriteString("\r") + l.next() + case '\\': + sb.WriteString("\\") + l.next() + case 'u': + l.next() + var code strings.Builder + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code.String()) + } + sb.WriteRune(rune(intcode)) + case 'U': + l.next() + var code strings.Builder + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code.WriteRune(c) + } + intcode, err := strconv.ParseInt(code.String(), 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code.String()) + } + sb.WriteRune(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + sb.WriteRune(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { + return l.errorf("cannot have ']' here") + } + l.brackets = l.brackets[:len(l.brackets)-1] + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go new file mode 100644 index 000000000..9dfe4b9e6 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/localtime.go @@ -0,0 +1,287 @@ +// Implementation of TOML's local date/time. +// +// Copied over from Google's civil to avoid pulling all the Google dependencies. +// Originals: +// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go +// Changes: +// * Renamed files from civil* to localtime*. +// * Package changed from civil to toml. +// * 'Local' prefix added to all structs. +// +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package toml + +import ( + "fmt" + "time" +) + +// A LocalDate represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type LocalDate struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// LocalDateOf returns the LocalDate in which a time occurs in that time's location. +func LocalDateOf(t time.Time) LocalDate { + var d LocalDate + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseLocalDate(s string) (LocalDate, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return LocalDate{}, err + } + return LocalDateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d LocalDate) IsValid() bool { + return LocalDateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.LocalDate, even when time.LocalDate returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d LocalDate) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d LocalDate) AddDays(n int) LocalDate { + return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d LocalDate) DaysSince(s LocalDate) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 LocalDate) Before(d2 LocalDate) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 LocalDate) After(d2 LocalDate) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseLocalDate. +func (d *LocalDate) UnmarshalText(data []byte) error { + var err error + *d, err = ParseLocalDate(string(data)) + return err +} + +// A LocalTime represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. +type LocalTime struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func LocalTimeOf(t time.Time) LocalTime { + var tm LocalTime + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseLocalTime parses a string and returns the time value it represents. +// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseLocalTime(s string) (LocalTime, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return LocalTime{}, err + } + return LocalTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t LocalTime) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return LocalTimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t LocalTime) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseLocalTime. +func (t *LocalTime) UnmarshalText(data []byte) error { + var err error + *t, err = ParseLocalTime(string(data)) + return err +} + +// A LocalDateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type LocalDateTime struct { + Date LocalDate + Time LocalTime +} + +// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. + +// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. +func LocalDateTimeOf(t time.Time) LocalDateTime { + return LocalDateTime{ + Date: LocalDateOf(t), + Time: LocalTimeOf(t), + } +} + +// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. +// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseLocalTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseLocalDateTime(s string) (LocalDateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return LocalDateTime{}, err + } + } + return LocalDateTimeOf(t), nil +} + +// String returns the date in the format described in ParseLocalDate. +func (dt LocalDateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt LocalDateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the LocalDateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then +// both +// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.LocalDateTime{ +// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, +// civil.LocalTime{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt LocalDateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt LocalDateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseLocalDateTime +func (dt *LocalDateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseLocalDateTime(string(data)) + return err +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 000000000..571273049 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,1308 @@ +package toml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +const ( + tagFieldName = "toml" + tagFieldComment = "comment" + tagCommented = "commented" + tagMultiline = "multiline" + tagLiteral = "literal" + tagDefault = "default" +) + +type tomlOpts struct { + name string + nameFromTag bool + comment string + commented bool + multiline bool + literal bool + include bool + omitempty bool + defaultValue string +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +type annotation struct { + tag string + comment string + commented string + multiline string + literal string + defaultValue string +} + +var annotationDefault = annotation{ + tag: tagFieldName, + comment: tagFieldComment, + commented: tagCommented, + multiline: tagMultiline, + literal: tagLiteral, + defaultValue: tagDefault, +} + +type MarshalOrder int + +// Orders the Encoder can write the fields to the output stream. +const ( + // Sort fields alphabetically. + OrderAlphabetical MarshalOrder = iota + 1 + // Preserve the order the fields are encountered. For example, the order of fields in + // a struct. + OrderPreserve +) + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var localDateType = reflect.TypeOf(LocalDate{}) +var localTimeType = reflect.TypeOf(LocalTime{}) +var localDateTimeType = reflect.TypeOf(LocalDateTime{}) +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + +// Check if the given marshal type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return isTimeType(mtype) + default: + return false + } +} + +func isTimeType(mtype reflect.Type) bool { + return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType +} + +// Check if the given marshal type maps to a Tree slice or array +func isTreeSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTreeSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTree(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a custom marshaler type +func isCustomMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isCustomMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a slice or array of a text marshaler type +func isTextMarshalerSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTextMarshalerSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) + default: + return false + } +} + +// Check if the given marshal type maps to a non-Tree slice or array +func isOtherSequence(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSequence(mtype.Elem()) + case reflect.Slice, reflect.Array: + return !isTreeSequence(mtype) + default: + return false + } +} + +// Check if the given marshal type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isTree(mtype.Elem()) + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +func isTextMarshaler(mtype reflect.Type) bool { + return mtype.Implements(textMarshalerType) && !isTimeType(mtype) +} + +func callTextMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(encoding.TextMarshaler).MarshalText() +} + +func isCustomUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(unmarshalerType) +} + +func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { + return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) +} + +func isTextUnmarshaler(mtype reflect.Type) bool { + return mtype.Implements(textUnmarshalerType) +} + +func callTextUnmarshaler(mval reflect.Value, text []byte) error { + return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Unmarshaler is the interface implemented by types that +// can unmarshal a TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.LocalTime time.LocalTime{}, pointers to same + +For additional flexibility, use the Encoder API. +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts + annotation + line int + col int + order MarshalOrder + promoteAnon bool + compactComments bool + indentation string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + annotation: annotationDefault, + line: 0, + col: 1, + order: OrderAlphabetical, + indentation: " ", + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3, +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +// Order allows to change in which order fields will be written to the output stream. +func (e *Encoder) Order(ord MarshalOrder) *Encoder { + e.order = ord + return e +} + +// Indentation allows to change indentation when marshalling. +func (e *Encoder) Indentation(indent string) *Encoder { + e.indentation = indent + return e +} + +// SetTagName allows changing default tag "toml" +func (e *Encoder) SetTagName(v string) *Encoder { + e.tag = v + return e +} + +// SetTagComment allows changing default tag "comment" +func (e *Encoder) SetTagComment(v string) *Encoder { + e.comment = v + return e +} + +// SetTagCommented allows changing default tag "commented" +func (e *Encoder) SetTagCommented(v string) *Encoder { + e.commented = v + return e +} + +// SetTagMultiline allows changing default tag "multiline" +func (e *Encoder) SetTagMultiline(v string) *Encoder { + e.multiline = v + return e +} + +// PromoteAnonymous allows to change how anonymous struct fields are marshaled. +// Usually, they are marshaled as if the inner exported fields were fields in +// the outer struct. However, if an anonymous struct field is given a name in +// its TOML tag, it is treated like a regular struct field with that name. +// rather than being anonymous. +// +// In case anonymous promotion is enabled, all anonymous structs are promoted +// and treated like regular struct fields. +func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { + e.promoteAnon = promote + return e +} + +// CompactComments removes the new line before each comment in the tree. +func (e *Encoder) CompactComments(cc bool) *Encoder { + e.compactComments = cc + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + // Check if indentation is valid + for _, char := range e.indentation { + if !isSpace(char) { + return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") + } + } + + mtype := reflect.TypeOf(v) + if mtype == nil { + return []byte{}, errors.New("nil cannot be marshaled to TOML") + } + + switch mtype.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Ptr: + if mtype.Elem().Kind() != reflect.Struct { + return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") + } + if reflect.ValueOf(v).IsNil() { + return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") + } + default: + return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") + } + + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + if isTextMarshaler(mtype) { + return callTextMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) + + return buf.Bytes(), err +} + +// Create next tree with a position based on Encoder.line +func (e *Encoder) nextTree() *Tree { + return newTreeWithPosition(Position{Line: e.line, Col: 1}) +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := e.nextTree() + switch mtype.Kind() { + case reflect.Struct: + switch mval.Interface().(type) { + case Tree: + reflect.ValueOf(tval).Elem().Set(mval) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef, e.annotation) + if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { + e.appendTree(tval, tree) + } else { + val = e.wrapTomlValue(val, tval) + tval.SetPathWithOptions([]string{opts.name}, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + Literal: opts.literal, + }, val) + } + } + } + } + case reflect.Map: + keys := mval.MapKeys() + if e.order == OrderPreserve && len(keys) > 0 { + // Sorting []reflect.Value is not straight forward. + // + // OrderPreserve will support deterministic results when string is used + // as the key to maps. + typ := keys[0].Type() + kind := keys[0].Kind() + if kind == reflect.String { + ikeys := make([]string, len(keys)) + for i := range keys { + ikeys[i] = keys[i].Interface().(string) + } + sort.Strings(ikeys) + for i := range ikeys { + keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) + } + } + } + for _, key := range keys { + mvalf := mval.MapIndex(key) + if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { + continue + } + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + val = e.wrapTomlValue(val, tval) + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.SetPath([]string{key.String()}, val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + if mtype.Kind() == reflect.Ptr { + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + default: + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + } + if mtype.Kind() == reflect.Interface { + return e.valueToToml(mval.Elem().Type(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTextMarshaler(mtype): + b, err := callTextMarshaler(mval) + return string(b), err + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): + return e.valueToOtherSlice(mtype, mval) + case isTreeSequence(mtype): + return e.valueToTreeSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { + return fmt.Sprint(mval), nil + } + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface(), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +func (e *Encoder) appendTree(t, o *Tree) error { + for key, value := range o.values { + if _, ok := t.values[key]; ok { + continue + } + if tomlValue, ok := value.(*tomlValue); ok { + tomlValue.position.Col = t.position.Col + } + t.values[key] = value + } + return nil +} + +// Create a toml value with the current line number as the position line +func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { + _, isTree := val.(*Tree) + _, isTreeS := val.([]*Tree) + if isTree || isTreeS { + e.line++ + return val + } + + ret := &tomlValue{ + value: val, + position: Position{ + e.line, + parent.position.Col, + }, + } + e.line++ + return ret +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t, tagName: tagFieldName} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// default:"foo" Provides a default value. +// +// For default values, only fields of the following types are supported: +// * string +// * bool +// * int +// * int64 +// * float64 +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts + tagName string + strict bool + visitor visitorState +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + tagName: tagFieldName, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +// SetTagName allows changing default tag "toml" +func (d *Decoder) SetTagName(v string) *Decoder { + d.tagName = v + return d +} + +// Strict allows changing to strict decoding. Any fields that are found in the +// input data and do not have a corresponding struct member cause an error. +func (d *Decoder) Strict(strict bool) *Decoder { + d.strict = strict + return d +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype == nil { + return errors.New("nil cannot be unmarshaled from TOML") + } + if mtype.Kind() != reflect.Ptr { + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + elem := mtype.Elem() + + switch elem.Kind() { + case reflect.Struct, reflect.Map: + case reflect.Interface: + elem = mapStringInterfaceType + default: + return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + } + + if reflect.ValueOf(v).IsNil() { + return errors.New("nil pointer cannot be unmarshaled from TOML") + } + + vv := reflect.ValueOf(v).Elem() + + if d.strict { + d.visitor = newVisitorState(d.tval) + } + + sval, err := d.valueFromTree(elem, d.tval, &vv) + if err != nil { + return err + } + if err := d.visitor.validate(); err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type. When mval1 +// is non-nil, merge fields into the given value instead of allocating a new one. +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + // Check if pointer to value implements the Unmarshaler interface. + if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { + d.visitor.visitAll() + + if tval == nil { + return mvalPtr.Elem(), nil + } + + if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + if mval1 != nil { + mval = *mval1 + } else { + mval = reflect.New(mtype).Elem() + } + + switch mval.Interface().(type) { + case Tree: + mval.Set(reflect.ValueOf(tval).Elem()) + default: + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + an := annotation{tag: d.tagName} + opts := tomlOptions(mtypef, an) + if !opts.include { + continue + } + baseKey := opts.name + keysToTry := []string{ + baseKey, + strings.ToLower(baseKey), + strings.ToTitle(baseKey), + strings.ToLower(string(baseKey[0])) + baseKey[1:], + } + + found := false + if tval != nil { + for _, key := range keysToTry { + exists := tval.HasPath([]string{key}) + if !exists { + continue + } + + d.visitor.push(key) + val := tval.GetPath([]string{key}) + fval := mval.Field(i) + mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.Field(i).Set(mvalf) + found = true + d.visitor.pop() + break + } + } + + if !found && opts.defaultValue != "" { + mvalf := mval.Field(i) + var val interface{} + var err error + switch mvalf.Kind() { + case reflect.String: + val = opts.defaultValue + case reflect.Bool: + val, err = strconv.ParseBool(opts.defaultValue) + case reflect.Uint: + val, err = strconv.ParseUint(opts.defaultValue, 10, 0) + case reflect.Uint8: + val, err = strconv.ParseUint(opts.defaultValue, 10, 8) + case reflect.Uint16: + val, err = strconv.ParseUint(opts.defaultValue, 10, 16) + case reflect.Uint32: + val, err = strconv.ParseUint(opts.defaultValue, 10, 32) + case reflect.Uint64: + val, err = strconv.ParseUint(opts.defaultValue, 10, 64) + case reflect.Int: + val, err = strconv.ParseInt(opts.defaultValue, 10, 0) + case reflect.Int8: + val, err = strconv.ParseInt(opts.defaultValue, 10, 8) + case reflect.Int16: + val, err = strconv.ParseInt(opts.defaultValue, 10, 16) + case reflect.Int32: + val, err = strconv.ParseInt(opts.defaultValue, 10, 32) + case reflect.Int64: + // Check if the provided number has a non-numeric extension. + var hasExtension bool + if len(opts.defaultValue) > 0 { + lastChar := opts.defaultValue[len(opts.defaultValue)-1] + if lastChar < '0' || lastChar > '9' { + hasExtension = true + } + } + // If the value is a time.Duration with extension, parse as duration. + // If the value is an int64 or a time.Duration without extension, parse as number. + if hasExtension && mvalf.Type().String() == "time.Duration" { + val, err = time.ParseDuration(opts.defaultValue) + } else { + val, err = strconv.ParseInt(opts.defaultValue, 10, 64) + } + case reflect.Float32: + val, err = strconv.ParseFloat(opts.defaultValue, 32) + case reflect.Float64: + val, err = strconv.ParseFloat(opts.defaultValue, 64) + default: + return mvalf, fmt.Errorf("unsupported field type for default option") + } + + if err != nil { + return mvalf, err + } + mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) + } + + // save the old behavior above and try to check structs + if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { + tmpTval := tval + if !mtypef.Anonymous { + tmpTval = nil + } + fval := mval.Field(i) + v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) + if err != nil { + return v, err + } + mval.Field(i).Set(v) + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + d.visitor.push(key) + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) + if err != nil { + return mval, formatError(err, tval.GetPositionPath([]string{key})) + } + mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + d.visitor.pop() + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + d.visitor.push(strconv.Itoa(i)) + val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + d.visitor.pop() + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval, err := makeSliceOrArray(mtype, len(tval)) + if err != nil { + return mval, err + } + + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val := reflect.ValueOf(tval) + length := val.Len() + + mval, err := makeSliceOrArray(mtype, length) + if err != nil { + return mval, err + } + + for i := 0; i < length; i++ { + val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Create a new slice or a new array with specified length +func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { + var mval reflect.Value + switch mtype.Kind() { + case reflect.Slice: + mval = reflect.MakeSlice(mtype, tLength, tLength) + case reflect.Array: + mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() + if tLength > mtype.Len() { + return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) + } + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type. When mval1 is non-nil +// and the given type is a struct value, merge fields into it. +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval, mval1) + } + + switch t := tval.(type) { + case *Tree: + var mval11 *reflect.Value + if mtype.Kind() == reflect.Struct { + mval11 = mval1 + } + + if isTree(mtype) { + return d.valueFromTree(mtype, t, mval11) + } + + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) + } else { + return d.valueFromToml(mval1.Elem().Type(), t, nil) + } + } + + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSequence(mtype) { + return d.valueFromTreeSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + d.visitor.visit() + if isOtherSequence(mtype) { + return d.valueFromOtherSlice(mtype, t) + } + if mtype.Kind() == reflect.Interface { + if mval1 == nil || mval1.IsNil() { + return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + d.visitor.visit() + mvalPtr := reflect.New(mtype) + + // Check if pointer to value implements the Unmarshaler interface. + if isCustomUnmarshaler(mvalPtr.Type()) { + if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) + } + return mvalPtr.Elem(), nil + } + + // Check if pointer to value implements the encoding.TextUnmarshaler. + if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { + if err := d.unmarshalText(tval, mvalPtr); err != nil { + return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) + } + return mvalPtr.Elem(), nil + } + + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + + switch val.Type() { + case localDateType: + localDate := val.Interface().(LocalDate) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil + } + case localDateTimeType: + localDateTime := val.Interface().(LocalDateTime) + switch mtype { + case timeType: + return reflect.ValueOf(time.Date( + localDateTime.Date.Year, + localDateTime.Date.Month, + localDateTime.Date.Day, + localDateTime.Time.Hour, + localDateTime.Time.Minute, + localDateTime.Time.Second, + localDateTime.Time.Nanosecond, + time.Local)), nil + } + } + + // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { + d, err := time.ParseDuration(val.String()) + if err != nil { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) + } + return reflect.ValueOf(d), nil + } + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Interface: + if mval1 == nil || mval1.IsNil() { + return reflect.ValueOf(tval), nil + } else { + ival := mval1.Elem() + return d.valueFromToml(mval1.Elem().Type(), t, &ival) + } + case reflect.Slice, reflect.Array: + if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { + return d.valueFromOtherSliceI(mtype, t) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { + var melem *reflect.Value + + if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { + elem := mval1.Elem() + melem = &elem + } + + val, err := d.valueFromToml(mtype.Elem(), tval, melem) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { + var buf bytes.Buffer + fmt.Fprint(&buf, tval) + return callTextUnmarshaler(mval, buf.Bytes()) +} + +func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { + tag := vf.Tag.Get(an.tag) + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get(an.comment); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) + multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) + literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) + defaultValue := vf.Tag.Get(tagDefault) + result := tomlOpts{ + name: vf.Name, + nameFromTag: false, + comment: comment, + commented: commented, + multiline: multiline, + literal: literal, + include: true, + omitempty: false, + defaultValue: defaultValue, + } + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + result.nameFromTag = true + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} + +// visitorState keeps track of which keys were unmarshaled. +type visitorState struct { + tree *Tree + path []string + keys map[string]struct{} + active bool +} + +func newVisitorState(tree *Tree) visitorState { + path, result := []string{}, map[string]struct{}{} + insertKeys(path, result, tree) + return visitorState{ + tree: tree, + path: path[:0], + keys: result, + active: true, + } +} + +func (s *visitorState) push(key string) { + if s.active { + s.path = append(s.path, key) + } +} + +func (s *visitorState) pop() { + if s.active { + s.path = s.path[:len(s.path)-1] + } +} + +func (s *visitorState) visit() { + if s.active { + delete(s.keys, strings.Join(s.path, ".")) + } +} + +func (s *visitorState) visitAll() { + if s.active { + for k := range s.keys { + if strings.HasPrefix(k, strings.Join(s.path, ".")) { + delete(s.keys, k) + } + } + } +} + +func (s *visitorState) validate() error { + if !s.active { + return nil + } + undecoded := make([]string, 0, len(s.keys)) + for key := range s.keys { + undecoded = append(undecoded, key) + } + sort.Strings(undecoded) + if len(undecoded) > 0 { + return fmt.Errorf("undecoded keys: %q", undecoded) + } + return nil +} + +func insertKeys(path []string, m map[string]struct{}, tree *Tree) { + for k, v := range tree.values { + switch node := v.(type) { + case []*Tree: + for i, item := range node { + insertKeys(append(path, k, strconv.Itoa(i)), m, item) + } + case *Tree: + insertKeys(append(path, k), m, node) + case *tomlValue: + m[strings.Join(append(path, k), ".")] = struct{}{} + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml new file mode 100644 index 000000000..792b72ed7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic_lists] + floats = [12.3,45.6,78.9] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + ints = [8001,8001,8002] + uints = [5002,5003] + strings = ["One","Two","Three"] + +[[subdocptrs]] + name = "Second" + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.second] + name = "Second" + + [subdoc.first] + name = "First" + +[basic] + uint = 5001 + bool = true + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + date = 1979-05-27T07:32:00Z + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml new file mode 100644 index 000000000..ba5e110bf --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -0,0 +1,39 @@ +title = "TOML Marshal Testing" + +[basic] + bool = true + date = 1979-05-27T07:32:00Z + float = 123.4 + float64 = 123.456782132399 + int = 5000 + string = "Bite me" + uint = 5001 + +[basic_lists] + bools = [true,false,true] + dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] + floats = [12.3,45.6,78.9] + ints = [8001,8001,8002] + strings = ["One","Two","Three"] + uints = [5002,5003] + +[basic_map] + one = "one" + two = "two" + +[subdoc] + + [subdoc.first] + name = "First" + + [subdoc.second] + name = "Second" + +[[subdoclist]] + name = "List.First" + +[[subdoclist]] + name = "List.Second" + +[[subdocptrs]] + name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 000000000..b3726d0dd --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,507 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + case tokenError: + p.raiseError(tok, "parsing error: %s", tok.String()) + default: + p.raiseError(tok, "unexpected token %s", tok.typ) + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + destTree := p.tree.GetPath(keys) + if target, ok := destTree.(*Tree); ok && target != nil && target.inline { + p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", + strings.Join(keys, ".")) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err.Error()) + } + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + prefixKey := parsedKey[0 : len(parsedKey)-1] + tableKey = append(tableKey, prefixKey...) + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + case nil: + // create intermediate + if err := p.tree.createSubTree(tableKey, key.Position); err != nil { + p.raiseError(key, "could not create intermediate group: %s", err) + } + targetNode = p.tree.GetPath(tableKey).(*Tree) + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + if targetNode.inline { + p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVal := parsedKey[len(parsedKey)-1] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var errInvalidUnderscore = errors.New("invalid use of _ in number") + +func numberContainsInvalidUnderscore(value string) error { + // For large numbers, you may use underscores between digits to enhance + // readability. Each underscore must be surrounded by at least one digit on + // each side. + + hasBefore := false + for idx, r := range value { + if r == '_' { + if !hasBefore || idx+1 >= len(value) { + // can't end with an underscore + return errInvalidUnderscore + } + } + hasBefore = isDigit(r) + } + return nil +} + +var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") + +func hexNumberContainsInvalidUnderscore(value string) error { + hasBefore := false + for idx, r := range value { + if r == '_' { + if !hasBefore || idx+1 >= len(value) { + // can't end with an underscore + return errInvalidUnderscoreHex + } + } + hasBefore = isHexDigit(r) + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + base := 10 + s := cleanedVal + checkInvalidUnderscore := numberContainsInvalidUnderscore + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + checkInvalidUnderscore = hexNumberContainsInvalidUnderscore + base = 16 + case 'o': + base = 8 + case 'b': + base = 2 + default: + panic("invalid base") // the lexer should catch this first + } + s = cleanedVal[2:] + } + + err := checkInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + + var val interface{} + val, err = strconv.ParseInt(s, base, 64) + if err == nil { + return val + } + + if s[0] != '-' { + if val, err = strconv.ParseUint(s, base, 64); err == nil { + return val + } + } + p.raiseError(tok, "%s", err) + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalTime: + val, err := ParseLocalTime(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLocalDate: + // a local date may be followed by: + // * nothing: this is a local date + // * a local time: this is a local date-time + + next := p.peek() + if next == nil || next.typ != tokenLocalTime { + val, err := ParseLocalDate(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + } + + localDate := tok + localTime := p.getToken() + + next = p.peek() + if next == nil || next.typ != tokenTimeOffset { + v := localDate.val + "T" + localTime.val + val, err := ParseLocalDateTime(v) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + } + + offset := p.getToken() + + layout := time.RFC3339Nano + v := localDate.val + "T" + localTime.val + offset.val + val, err := time.ParseInLocation(layout, v, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + default: + panic(fmt.Errorf("unhandled token: %v", tok)) + } + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey, tokenInteger, tokenString: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + + parsedKey, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid key: %s", err) + } + + value := p.parseRvalue() + tree.SetPath(parsedKey, value) + case tokenComma: + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + tree.inline = true + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(newTree()) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if reflect.TypeOf(val) != arrayType { + arrayType = nil + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + + // if the array is a mixed-type array or its length is 0, + // don't convert it to a table array + if len(array) <= 0 { + arrayType = nil + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 000000000..c17bff87b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 000000000..b437fdd3b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,136 @@ +package toml + +import "fmt" + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenLocalDate + tokenLocalTime + tokenTimeOffset + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "LocalDate", + "LocalTime", + "TimeOffset", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return '0' <= r && r <= '9' +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 000000000..5541b941f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,533 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + multiline bool + literal bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + inline bool + position Position +} + +func newTree() *Tree { + return newTreeWithPosition(Position{}) +} + +func newTreeWithPosition(pos Position) *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: pos, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetArray returns the value at key in the Tree. +// It returns []string, []int64, etc type if key has homogeneous lists +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArray(key string) interface{} { + if key == "" { + return t + } + return t.GetArrayPath(strings.Split(key, ".")) +} + +// GetArrayPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetArrayPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + switch n := node.value.(type) { + case []interface{}: + return getArray(n) + default: + return node.value + } + default: + return node + } +} + +// if homogeneous array, then return slice type object over []interface{} +func getArray(n []interface{}) interface{} { + var s []string + var i64 []int64 + var f64 []float64 + var bl []bool + for _, value := range n { + switch v := value.(type) { + case string: + s = append(s, v) + case int64: + i64 = append(i64, v) + case float64: + f64 = append(f64, v) + case bool: + bl = append(bl, v) + default: + return n + } + } + if len(s) == len(n) { + return s + } else if len(i64) == len(n) { + return i64 + } else if len(f64) == len(n) { + return f64 + } else if len(bl) == len(n) { + return bl + } + return n +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// SetPositionPath sets the position of element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree position is set. +func (t *Tree) SetPositionPath(keys []string, pos Position) { + if len(keys) == 0 { + t.position = pos + return + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + subtree = node[len(node)-1] + default: + return + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + node.position = pos + return + case *Tree: + node.position = pos + return + case []*Tree: + // go to most recent element + if len(node) == 0 { + return + } + node[len(node)-1].position = pos + return + } +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. +// The default values within the struct are valid default options. +type SetOptions struct { + Comment string + Commented bool + Multiline bool + Literal bool +} + +// SetWithOptions is the same as Set, but allows you to provide formatting +// instructions to the key, that will be used by Marshal(). +func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { + t.SetPathWithOptions(strings.Split(key, "."), opts, value) +} + +// SetPathWithOptions is the same as SetPath, but allows you to provide +// formatting instructions to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { + subtree := t + for i, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = node + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch v := value.(type) { + case *Tree: + v.comment = opts.Comment + v.commented = opts.Commented + toInsert = value + case []*Tree: + for i := range v { + v[i].commented = opts.Commented + } + toInsert = value + case *tomlValue: + v.comment = opts.Comment + v.commented = opts.Commented + v.multiline = opts.Multiline + v.literal = opts.Literal + toInsert = v + default: + toInsert = &tomlValue{value: value, + comment: opts.Comment, + commented: opts.Commented, + multiline: opts.Multiline, + literal: opts.Literal, + position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) +} + +// Delete removes a key from the tree. +// Key is a dot-separated path (e.g. a.b.c). +func (t *Tree) Delete(key string) error { + keys, err := parseKey(key) + if err != nil { + return err + } + return t.DeletePath(keys) +} + +// DeletePath removes a key from the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +func (t *Tree) DeletePath(keys []string) error { + keyLen := len(keys) + if keyLen == 1 { + delete(t.values, keys[0]) + return nil + } + tree := t.GetPath(keys[:keyLen-1]) + item := keys[keyLen-1] + switch node := tree.(type) { + case *Tree: + delete(node.values, item) + return nil + } + return errors.New("no such key to delete") +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for i, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + tree.position = pos + tree.inline = subtree.inline + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = fmt.Errorf("%s", r) + } + }() + + if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { + b = b[4:] + } else if len(b) >= 3 && hasUTF8BOM3(b) { + b = b[3:] + } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { + b = b[2:] + } + + tree = parseToml(lexToml(b)) + return +} + +func hasUTF16BigEndianBOM2(b []byte) bool { + return b[0] == 0xFE && b[1] == 0xFF +} + +func hasUTF16LittleEndianBOM2(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE +} + +func hasUTF8BOM3(b []byte) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +func hasUTF32BigEndianBOM4(b []byte) bool { + return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF +} + +func hasUTF32LittleEndianBOM4(b []byte) bool { + return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go new file mode 100644 index 000000000..4136b4625 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomlpub.go @@ -0,0 +1,71 @@ +package toml + +// PubTOMLValue wrapping tomlValue in order to access all properties from outside. +type PubTOMLValue = tomlValue + +func (ptv *PubTOMLValue) Value() interface{} { + return ptv.value +} +func (ptv *PubTOMLValue) Comment() string { + return ptv.comment +} +func (ptv *PubTOMLValue) Commented() bool { + return ptv.commented +} +func (ptv *PubTOMLValue) Multiline() bool { + return ptv.multiline +} +func (ptv *PubTOMLValue) Position() Position { + return ptv.position +} + +func (ptv *PubTOMLValue) SetValue(v interface{}) { + ptv.value = v +} +func (ptv *PubTOMLValue) SetComment(s string) { + ptv.comment = s +} +func (ptv *PubTOMLValue) SetCommented(c bool) { + ptv.commented = c +} +func (ptv *PubTOMLValue) SetMultiline(m bool) { + ptv.multiline = m +} +func (ptv *PubTOMLValue) SetPosition(p Position) { + ptv.position = p +} + +// PubTree wrapping Tree in order to access all properties from outside. +type PubTree = Tree + +func (pt *PubTree) Values() map[string]interface{} { + return pt.values +} + +func (pt *PubTree) Comment() string { + return pt.comment +} + +func (pt *PubTree) Commented() bool { + return pt.commented +} + +func (pt *PubTree) Inline() bool { + return pt.inline +} + +func (pt *PubTree) SetValues(v map[string]interface{}) { + pt.values = v +} + +func (pt *PubTree) SetComment(c string) { + pt.comment = c +} + +func (pt *PubTree) SetCommented(c bool) { + pt.commented = c +} + +func (pt *PubTree) SetInline(i bool) { + pt.inline = i +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 000000000..80353500a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,155 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + case []interface{}: + value := reflect.ValueOf(original) + length := value.Len() + arrayValue := reflect.MakeSlice(value.Type(), 0, length) + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return arrayValue.Interface(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 000000000..c9afbdab7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,552 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type valueComplexity int + +const ( + valueSimple valueComplexity = iota + 1 + valueComplex +) + +type sortNode struct { + key string + complexity valueComplexity +} + +// Encodes a string to a TOML-compliant multi-line string value +// This function is a clone of the existing encodeTomlString function, except that whitespace characters +// are preserved. Quotation marks and backslashes are also not escaped. +func encodeMultilineTomlString(value string, commented string) string { + var b bytes.Buffer + adjacentQuoteCount := 0 + + b.WriteString(commented) + for i, rr := range value { + if rr != '"' { + adjacentQuoteCount = 0 + } else { + adjacentQuoteCount++ + } + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString("\t") + case '\n': + b.WriteString("\n" + commented) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString("\r") + case '"': + if adjacentQuoteCount >= 3 || i == len(value)-1 { + adjacentQuoteCount = 0 + b.WriteString(`\"`) + } else { + b.WriteString(`"`) + } + case '\\': + b.WriteString(`\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +// Encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { + var orderedVals []sortNode + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + var values []string + for _, node := range orderedVals { + k := node.key + v := t.values[k] + + repr, err := tomlValueStringRepresentation(v, "", "", ord, false) + if err != nil { + return "", err + } + values = append(values, quoteKeyIfNeeded(k)+" = "+repr) + } + return "{ " + strings.Join(values, ", ") + " }", nil +} + +func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { + // this interface check is added to dereference the change made in the writeTo function. + // That change was made to allow this function to see formatting options. + tv, ok := v.(*tomlValue) + if ok { + v = tv.value + } else { + tv = &tomlValue{} + } + + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Default bit length is full 64 + bits := 64 + // Float panics if nan is used + if !math.IsNaN(value) { + // if 32 bit accuracy is enough to exactly show, use 32 + _, acc := big.NewFloat(value).Float32() + if acc == big.Exact { + bits = 32 + } + } + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil + case string: + if tv.multiline { + if tv.literal { + b := strings.Builder{} + b.WriteString("'''\n") + b.Write([]byte(value)) + b.WriteString("\n'''") + return b.String(), nil + } else { + return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil + } + } + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return string(b), nil + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case LocalDate: + return value.String(), nil + case LocalDateTime: + return value.String(), nil + case LocalTime: + return value.String(), nil + case *Tree: + return tomlTreeStringRepresentation(value, ord) + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for _, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(commented + value) + stringBuffer.WriteString(`,`) + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + commented + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ", ") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func getTreeArrayLine(trees []*Tree) (line int) { + // Prevent returning 0 for empty trees + line = int(^uint(0) >> 1) + // get lowest line number >= 0 + for _, tv := range trees { + if tv.position.Line < line || line == 0 { + line = tv.position.Line + } + } + return +} + +func sortByLines(t *Tree) (vals []sortNode) { + var ( + line int + lines []int + tv *Tree + tom *tomlValue + node sortNode + ) + vals = make([]sortNode, 0) + m := make(map[int]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree: + tv = v.(*Tree) + line = tv.position.Line + node = sortNode{key: k, complexity: valueComplex} + case []*Tree: + line = getTreeArrayLine(v.([]*Tree)) + node = sortNode{key: k, complexity: valueComplex} + default: + tom = v.(*tomlValue) + line = tom.position.Line + node = sortNode{key: k, complexity: valueSimple} + } + lines = append(lines, line) + vals = append(vals, node) + m[line] = node + } + sort.Ints(lines) + + for i, line := range lines { + vals[i] = m[line] + } + + return vals +} + +func sortAlphabetical(t *Tree) (vals []sortNode) { + var ( + node sortNode + simpVals []string + compVals []string + ) + vals = make([]sortNode, 0) + m := make(map[string]sortNode) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + node = sortNode{key: k, complexity: valueComplex} + compVals = append(compVals, node.key) + default: + node = sortNode{key: k, complexity: valueSimple} + simpVals = append(simpVals, node.key) + } + vals = append(vals, node) + m[node.key] = node + } + + // Simples first to match previous implementation + sort.Strings(simpVals) + i := 0 + for _, key := range simpVals { + vals[i] = m[key] + i++ + } + + sort.Strings(compVals) + for _, key := range compVals { + vals[i] = m[key] + i++ + } + + return vals +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) +} + +func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { + var orderedVals []sortNode + + switch ord { + case OrderPreserve: + orderedVals = sortByLines(t) + default: + orderedVals = sortAlphabetical(t) + } + + for _, node := range orderedVals { + switch node.complexity { + case valueComplex: + k := node.key + v := t.values[k] + + combinedKey := quoteKeyIfNeeded(k) + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if parentCommented || t.commented || tv.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + var commented string + if parentCommented || t.commented || subTree.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) + if err != nil { + return bytesCount, err + } + } + } + default: // Simple + k := node.key + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + var commented string + if parentCommented || t.commented || v.commented { + commented = "# " + } + repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + if !compactComments { + writtenBytesCountComment, errc := writeStrings(w, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + quotedKey := quoteKeyIfNeeded(k) + writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + } + + return bytesCount, nil +} + +// quote a key if it does not fit the bare key format (A-Za-z0-9_-) +// quoted keys use the same rules as strings +func quoteKeyIfNeeded(k string) string { + // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain + // keys that have already been quoted. + // not an ideal situation, but good enough of a stop gap. + if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { + return k + } + isBare := true + for _, r := range k { + if !isValidBareChar(r) { + isBare = false + break + } + } + if isBare { + return k + } + return quoteKey(k) +} + +func quoteKey(k string) string { + return "\"" + encodeTomlString(k) + "\"" +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + b, err := t.Marshal() + if err != nil { + return "", err + } + return string(b), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = tomlValueToGo(node.value) + } + } + return result +} + +func tomlValueToGo(v interface{}) interface{} { + if tree, ok := v.(*Tree); ok { + return tree.ToMap() + } + + rv := reflect.ValueOf(v) + + if rv.Kind() != reflect.Slice { + return v + } + values := make([]interface{}, rv.Len()) + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + values[i] = tomlValueToGo(item) + } + return values +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go new file mode 100644 index 000000000..fa326308c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go @@ -0,0 +1,6 @@ +package toml + +// ValueStringRepresentation transforms an interface{} value into its toml string representation. +func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { + return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0081914bf..188f17733 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -41,6 +41,9 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k +# github.com/alessio/shellescape v1.4.1 +## explicit; go 1.14 +github.com/alessio/shellescape # github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d ## explicit; go 1.13 github.com/asaskevich/govalidator @@ -258,6 +261,10 @@ github.com/google/gofuzz/bytesource # github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 ## explicit; go 1.14 github.com/google/pprof/profile +# github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 +## explicit; go 1.19 +github.com/google/safetext/common +github.com/google/safetext/yamltemplate # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex @@ -414,6 +421,9 @@ github.com/opencontainers/go-digest ## explicit; go 1.17 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 +# github.com/pelletier/go-toml v1.9.5 +## explicit; go 1.12 +github.com/pelletier/go-toml # github.com/peterbourgon/diskv v2.0.1+incompatible ## explicit github.com/peterbourgon/diskv @@ -1156,6 +1166,46 @@ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json +# sigs.k8s.io/kind v0.20.0 +## explicit; go 1.16 +sigs.k8s.io/kind/pkg/apis/config/defaults +sigs.k8s.io/kind/pkg/apis/config/v1alpha4 +sigs.k8s.io/kind/pkg/cluster +sigs.k8s.io/kind/pkg/cluster/constants +sigs.k8s.io/kind/pkg/cluster/internal/create +sigs.k8s.io/kind/pkg/cluster/internal/create/actions +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer +sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready +sigs.k8s.io/kind/pkg/cluster/internal/delete +sigs.k8s.io/kind/pkg/cluster/internal/kubeadm +sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig +sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig +sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer +sigs.k8s.io/kind/pkg/cluster/internal/logs +sigs.k8s.io/kind/pkg/cluster/internal/providers +sigs.k8s.io/kind/pkg/cluster/internal/providers/common +sigs.k8s.io/kind/pkg/cluster/internal/providers/docker +sigs.k8s.io/kind/pkg/cluster/internal/providers/podman +sigs.k8s.io/kind/pkg/cluster/nodes +sigs.k8s.io/kind/pkg/cluster/nodeutils +sigs.k8s.io/kind/pkg/cmd +sigs.k8s.io/kind/pkg/cmd/kind/version +sigs.k8s.io/kind/pkg/errors +sigs.k8s.io/kind/pkg/exec +sigs.k8s.io/kind/pkg/fs +sigs.k8s.io/kind/pkg/internal/apis/config +sigs.k8s.io/kind/pkg/internal/apis/config/encoding +sigs.k8s.io/kind/pkg/internal/cli +sigs.k8s.io/kind/pkg/internal/env +sigs.k8s.io/kind/pkg/internal/patch +sigs.k8s.io/kind/pkg/internal/sets +sigs.k8s.io/kind/pkg/internal/version +sigs.k8s.io/kind/pkg/log # sigs.k8s.io/kustomize/api v0.13.2 ## explicit; go 1.19 sigs.k8s.io/kustomize/api/filters/annotations diff --git a/vendor/sigs.k8s.io/kind/LICENSE b/vendor/sigs.k8s.io/kind/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/kind/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go new file mode 100644 index 000000000..d546929d5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package defaults contains cross-api-version configuration defaults +package defaults + +// Image is the default for the Config.Image field, aka the default node image. +const Image = "kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72" diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go new file mode 100644 index 000000000..4626fdd41 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + "sigs.k8s.io/kind/pkg/apis/config/defaults" +) + +// SetDefaultsCluster sets uninitialized fields to their default value. +func SetDefaultsCluster(obj *Cluster) { + // default to a one node cluster + if len(obj.Nodes) == 0 { + obj.Nodes = []Node{ + { + Image: defaults.Image, + Role: ControlPlaneRole, + }, + } + } + // default the nodes + for i := range obj.Nodes { + a := &obj.Nodes[i] + SetDefaultsNode(a) + } + if obj.Networking.IPFamily == "" { + obj.Networking.IPFamily = IPv4Family + } + // default to listening on 127.0.0.1:randomPort on ipv4 + // and [::1]:randomPort on ipv6 + if obj.Networking.APIServerAddress == "" { + obj.Networking.APIServerAddress = "127.0.0.1" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.APIServerAddress = "::1" + } + } + // default the pod CIDR + if obj.Networking.PodSubnet == "" { + obj.Networking.PodSubnet = "10.244.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + // node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices + // xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else- + obj.Networking.PodSubnet = "fd00:10:244::/56" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.PodSubnet = "10.244.0.0/16,fd00:10:244::/56" + } + } + // default the service CIDR using a different subnet than kubeadm default + // https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32 + // Note: kubeadm is using a /12 subnet, that may allocate a 2^20 bitmap in etcd + // we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services) + if obj.Networking.ServiceSubnet == "" { + obj.Networking.ServiceSubnet = "10.96.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.ServiceSubnet = "fd00:10:96::/112" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.ServiceSubnet = "10.96.0.0/16,fd00:10:96::/112" + } + } + // default the KubeProxyMode using iptables as it's already the default + if obj.Networking.KubeProxyMode == "" { + obj.Networking.KubeProxyMode = IPTablesProxyMode + } +} + +// SetDefaultsNode sets uninitialized fields to their default value. +func SetDefaultsNode(obj *Node) { + if obj.Image == "" { + obj.Image = defaults.Image + } + + if obj.Role == "" { + obj.Role = ControlPlaneRole + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go new file mode 100644 index 000000000..68c743e78 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha4 implements the v1alpha4 apiVersion of kind's cluster +// configuration +// +// +k8s:deepcopy-gen=package +package v1alpha4 diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go new file mode 100644 index 000000000..308a6853b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go @@ -0,0 +1,317 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +// Cluster contains kind cluster configuration +type Cluster struct { + TypeMeta `yaml:",inline" json:",inline"` + + // The cluster name. + // Optional, this will be overridden by --name / KIND_CLUSTER_NAME + Name string `yaml:"name,omitempty" json:"name,omitempty"` + + // Nodes contains the list of nodes defined in the `kind` Cluster + // If unset this will default to a single control-plane node + // Note that if more than one control plane is specified, an external + // control plane load balancer will be provisioned implicitly + Nodes []Node `yaml:"nodes,omitempty" json:"nodes,omitempty"` + + /* Advanced fields */ + + // Networking contains cluster wide network settings + Networking Networking `yaml:"networking,omitempty" json:"networking,omitempty"` + + // FeatureGates contains a map of Kubernetes feature gates to whether they + // are enabled. The feature gates specified here are passed to all Kubernetes components as flags or in config. + // + // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + FeatureGates map[string]bool `yaml:"featureGates,omitempty" json:"featureGates,omitempty"` + + // RuntimeConfig Keys and values are translated into --runtime-config values for kube-apiserver, separated by commas. + // + // Use this to enable alpha APIs. + RuntimeConfig map[string]string `yaml:"runtimeConfig,omitempty" json:"runtimeConfig,omitempty"` + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // merge patches. The `kind` field must match the target object, and + // if `apiVersion` is specified it will only be applied to matching objects. + // + // This should be an inline yaml blob-string + // + // https://tools.ietf.org/html/rfc7386 + // + // The cluster-level patches are applied before the node-level patches. + KubeadmConfigPatches []string `yaml:"kubeadmConfigPatches,omitempty" json:"kubeadmConfigPatches,omitempty"` + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as JSON 6902 patches. The `kind` field must match the target object, and + // if group or version are specified it will only be objects matching the + // apiVersion: group+"/"+version + // + // Name and Namespace are now ignored, but the fields continue to exist for + // backwards compatibility of parsing the config. The name of the generated + // config was/is always fixed as is the namespace so these fields have + // always been a no-op. + // + // https://tools.ietf.org/html/rfc6902 + // + // The cluster-level patches are applied before the node-level patches. + KubeadmConfigPatchesJSON6902 []PatchJSON6902 `yaml:"kubeadmConfigPatchesJSON6902,omitempty" json:"kubeadmConfigPatchesJSON6902,omitempty"` + + // ContainerdConfigPatches are applied to every node's containerd config + // in the order listed. + // These should be toml stringsto be applied as merge patches + ContainerdConfigPatches []string `yaml:"containerdConfigPatches,omitempty" json:"containerdConfigPatches,omitempty"` + + // ContainerdConfigPatchesJSON6902 are applied to every node's containerd config + // in the order listed. + // These should be YAML or JSON formatting RFC 6902 JSON patches + ContainerdConfigPatchesJSON6902 []string `yaml:"containerdConfigPatchesJSON6902,omitempty" json:"containerdConfigPatchesJSON6902,omitempty"` +} + +// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta +// No need for a direct dependence; the fields are stable. +type TypeMeta struct { + Kind string `yaml:"kind,omitempty" json:"kind,omitempty"` + APIVersion string `yaml:"apiVersion,omitempty" json:"apiVersion,omitempty"` +} + +// Node contains settings for a node in the `kind` Cluster. +// A node in kind config represent a container that will be provisioned with all the components +// required for the assigned role in the Kubernetes cluster +type Node struct { + // Role defines the role of the node in the Kubernetes cluster + // created by kind + // + // Defaults to "control-plane" + Role NodeRole `yaml:"role,omitempty" json:"role,omitempty"` + + // Image is the node image to use when creating this node + // If unset a default image will be used, see defaults.Image + Image string `yaml:"image,omitempty" json:"image,omitempty"` + + // Labels are the labels with which the respective node will be labeled + Labels map[string]string `yaml:"labels,omitempty" json:"labels,omitempty"` + + /* Advanced fields */ + + // TODO: cri-like types should be inline instead + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + ExtraMounts []Mount `yaml:"extraMounts,omitempty" json:"extraMounts,omitempty"` + + // ExtraPortMappings describes additional port mappings for the node container + // binded to a host Port + ExtraPortMappings []PortMapping `yaml:"extraPortMappings,omitempty" json:"extraPortMappings,omitempty"` + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // merge patches. The `kind` field must match the target object, and + // if `apiVersion` is specified it will only be applied to matching objects. + // + // This should be an inline yaml blob-string + // + // https://tools.ietf.org/html/rfc7386 + // + // The node-level patches will be applied after the cluster-level patches + // have been applied. (See Cluster.KubeadmConfigPatches) + KubeadmConfigPatches []string `yaml:"kubeadmConfigPatches,omitempty" json:"kubeadmConfigPatches,omitempty"` + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as JSON 6902 patches. The `kind` field must match the target object, and + // if group or version are specified it will only be objects matching the + // apiVersion: group+"/"+version + // + // Name and Namespace are now ignored, but the fields continue to exist for + // backwards compatibility of parsing the config. The name of the generated + // config was/is always fixed as is the namespace so these fields have + // always been a no-op. + // + // https://tools.ietf.org/html/rfc6902 + // + // The node-level patches will be applied after the cluster-level patches + // have been applied. (See Cluster.KubeadmConfigPatchesJSON6902) + KubeadmConfigPatchesJSON6902 []PatchJSON6902 `yaml:"kubeadmConfigPatchesJSON6902,omitempty" json:"kubeadmConfigPatchesJSON6902,omitempty"` +} + +// NodeRole defines possible role for nodes in a Kubernetes cluster managed by `kind` +type NodeRole string + +const ( + // ControlPlaneRole identifies a node that hosts a Kubernetes control-plane. + // NOTE: in single node clusters, control-plane nodes act also as a worker + // nodes, in which case the taint will be removed. see: + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#control-plane-node-isolation + ControlPlaneRole NodeRole = "control-plane" + // WorkerRole identifies a node that hosts a Kubernetes worker + WorkerRole NodeRole = "worker" +) + +// Networking contains cluster wide network settings +type Networking struct { + // IPFamily is the network cluster model, currently it can be ipv4 or ipv6 + IPFamily ClusterIPFamily `yaml:"ipFamily,omitempty" json:"ipFamily,omitempty"` + // APIServerPort is the listen port on the host for the Kubernetes API Server + // Defaults to a random port on the host obtained by kind + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + APIServerPort int32 `yaml:"apiServerPort,omitempty" json:"apiServerPort,omitempty"` + // APIServerAddress is the listen address on the host for the Kubernetes + // API Server. This should be an IP address. + // + // Defaults to 127.0.0.1 + APIServerAddress string `yaml:"apiServerAddress,omitempty" json:"apiServerAddress,omitempty"` + // PodSubnet is the CIDR used for pod IPs + // kind will select a default if unspecified + PodSubnet string `yaml:"podSubnet,omitempty" json:"podSubnet,omitempty"` + // ServiceSubnet is the CIDR used for services VIPs + // kind will select a default if unspecified for IPv6 + ServiceSubnet string `yaml:"serviceSubnet,omitempty" json:"serviceSubnet,omitempty"` + // If DisableDefaultCNI is true, kind will not install the default CNI setup. + // Instead the user should install their own CNI after creating the cluster. + DisableDefaultCNI bool `yaml:"disableDefaultCNI,omitempty" json:"disableDefaultCNI,omitempty"` + // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + // Defaults to 'iptables' mode + KubeProxyMode ProxyMode `yaml:"kubeProxyMode,omitempty" json:"kubeProxyMode,omitempty"` + // DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host. + DNSSearch *[]string `yaml:"dnsSearch,omitempty" json:"dnsSearch,omitempty"` +} + +// ClusterIPFamily defines cluster network IP family +type ClusterIPFamily string + +const ( + // IPv4Family sets ClusterIPFamily to ipv4 + IPv4Family ClusterIPFamily = "ipv4" + // IPv6Family sets ClusterIPFamily to ipv6 + IPv6Family ClusterIPFamily = "ipv6" + // DualStackFamily sets ClusterIPFamily to dual + DualStackFamily ClusterIPFamily = "dual" +) + +// ProxyMode defines a proxy mode for kube-proxy +type ProxyMode string + +const ( + // IPTablesProxyMode sets ProxyMode to iptables + IPTablesProxyMode ProxyMode = "iptables" + // IPVSProxyMode sets ProxyMode to ipvs + IPVSProxyMode ProxyMode = "ipvs" +) + +// PatchJSON6902 represents an inline kustomize json 6902 patch +// https://tools.ietf.org/html/rfc6902 +type PatchJSON6902 struct { + // these fields specify the patch target resource + Group string `yaml:"group" json:"group"` + Version string `yaml:"version" json:"version"` + Kind string `yaml:"kind" json:"kind"` + // Patch should contain the contents of the json patch as a string + Patch string `yaml:"patch" json:"patch"` +} + +/* +These types are from +https://github.com/kubernetes/kubernetes/blob/063e7ff358fdc8b0916e6f39beedc0d025734cb1/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go#L183 +*/ + +// Mount specifies a host volume to mount into a container. +// This is a close copy of the upstream cri Mount type +// see: k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2 +// It additionally serializes the "propagation" field with the string enum +// names on disk as opposed to the int32 values, and the serialized field names +// have been made closer to core/v1 VolumeMount field names +// In yaml this looks like: +// +// containerPath: /foo +// hostPath: /bar +// readOnly: true +// selinuxRelabel: false +// propagation: None +// +// Propagation may be one of: None, HostToContainer, Bidirectional +type Mount struct { + // Path of the mount within the container. + ContainerPath string `yaml:"containerPath,omitempty" json:"containerPath,omitempty"` + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string `yaml:"hostPath,omitempty" json:"hostPath,omitempty"` + // If set, the mount is read-only. + Readonly bool `yaml:"readOnly,omitempty" json:"readOnly,omitempty"` + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool `yaml:"selinuxRelabel,omitempty" json:"selinuxRelabel,omitempty"` + // Requested propagation mode. + Propagation MountPropagation `yaml:"propagation,omitempty" json:"propagation,omitempty"` +} + +// PortMapping specifies a host port mapped into a container port. +// In yaml this looks like: +// +// containerPort: 80 +// hostPort: 8000 +// listenAddress: 127.0.0.1 +// protocol: TCP +type PortMapping struct { + // Port within the container. + ContainerPort int32 `yaml:"containerPort,omitempty" json:"containerPort,omitempty"` + // Port on the host. + // + // If unset, a random port will be selected. + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + HostPort int32 `yaml:"hostPort,omitempty" json:"hostPort,omitempty"` + // TODO: add protocol (tcp/udp) and port-ranges + ListenAddress string `yaml:"listenAddress,omitempty" json:"listenAddress,omitempty"` + // Protocol (TCP/UDP/SCTP) + Protocol PortMappingProtocol `yaml:"protocol,omitempty" json:"protocol,omitempty"` +} + +// MountPropagation represents an "enum" for mount propagation options, +// see also Mount. +type MountPropagation string + +const ( + // MountPropagationNone specifies that no mount propagation + // ("private" in Linux terminology). + MountPropagationNone MountPropagation = "None" + // MountPropagationHostToContainer specifies that mounts get propagated + // from the host to the container ("rslave" in Linux). + MountPropagationHostToContainer MountPropagation = "HostToContainer" + // MountPropagationBidirectional specifies that mounts get propagated from + // the host to the container and from the container to the host + // ("rshared" in Linux). + MountPropagationBidirectional MountPropagation = "Bidirectional" +) + +// PortMappingProtocol represents an "enum" for port mapping protocol options, +// see also PortMapping. +type PortMappingProtocol string + +const ( + // PortMappingProtocolTCP specifies TCP protocol + PortMappingProtocolTCP PortMappingProtocol = "TCP" + // PortMappingProtocolUDP specifies UDP protocol + PortMappingProtocolUDP PortMappingProtocol = "UDP" + // PortMappingProtocolSCTP specifies SCTP protocol + PortMappingProtocolSCTP PortMappingProtocol = "SCTP" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go new file mode 100644 index 000000000..d34d4c8cc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha4 + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" +) + +/* +Custom YAML (de)serialization for these types +*/ + +// UnmarshalYAML implements custom decoding YAML +// https://godoc.org/gopkg.in/yaml.v3 +func (m *Mount) UnmarshalYAML(unmarshal func(interface{}) error) error { + // first unmarshal in the alias type (to avoid a recursion loop on unmarshal) + type MountAlias Mount + var a MountAlias + if err := unmarshal(&a); err != nil { + return err + } + // now handle propagation + switch a.Propagation { + case "": // unset, will be defaulted + case MountPropagationNone: + case MountPropagationHostToContainer: + case MountPropagationBidirectional: + default: + return errors.Errorf("Unknown MountPropagation: %q", a.Propagation) + } + // and copy over the fields + *m = Mount(a) + return nil +} + +// UnmarshalYAML implements custom decoding YAML +// https://godoc.org/gopkg.in/yaml.v3 +func (p *PortMapping) UnmarshalYAML(unmarshal func(interface{}) error) error { + // first unmarshal in the alias type (to avoid a recursion loop on unmarshal) + type PortMappingAlias PortMapping + var a PortMappingAlias + if err := unmarshal(&a); err != nil { + return err + } + // now handle the protocol field + a.Protocol = PortMappingProtocol(strings.ToUpper(string(a.Protocol))) + switch a.Protocol { + case "": // unset, will be defaulted + case PortMappingProtocolTCP: + case PortMappingProtocolUDP: + case PortMappingProtocolSCTP: + default: + return errors.Errorf("Unknown PortMappingProtocol: %q", a.Protocol) + } + // and copy over the fields + *p = PortMapping(a) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go new file mode 100644 index 000000000..b210133da --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go @@ -0,0 +1,213 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha4 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Networking.DeepCopyInto(&out.Networking) + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatches != nil { + in, out := &in.ContainerdConfigPatches, &out.ContainerdConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatchesJSON6902 != nil { + in, out := &in.ContainerdConfigPatchesJSON6902, &out.ContainerdConfigPatchesJSON6902 + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.DNSSearch != nil { + in, out := &in.DNSSearch, &out.DNSSearch + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.ExtraPortMappings != nil { + in, out := &in.ExtraPortMappings, &out.ExtraPortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchJSON6902) DeepCopyInto(out *PatchJSON6902) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchJSON6902. +func (in *PatchJSON6902) DeepCopy() *PatchJSON6902 { + if in == nil { + return nil + } + out := new(PatchJSON6902) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeMeta) DeepCopyInto(out *TypeMeta) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeMeta. +func (in *TypeMeta) DeepCopy() *TypeMeta { + if in == nil { + return nil + } + out := new(TypeMeta) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go new file mode 100644 index 000000000..fb9c0734c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package constants contains well known constants for kind clusters +package constants + +// DefaultClusterName is the default cluster Context name +const DefaultClusterName = "kind" + +/* node role value constants */ +const ( + // ControlPlaneNodeRoleValue identifies a node that hosts a Kubernetes + // control-plane. + // + // NOTE: in single node clusters, control-plane nodes act as worker nodes + ControlPlaneNodeRoleValue string = "control-plane" + + // WorkerNodeRoleValue identifies a node that hosts a Kubernetes worker + WorkerNodeRoleValue string = "worker" + + // ExternalLoadBalancerNodeRoleValue identifies a node that hosts an + // external load balancer for the API server in HA configurations. + // + // Please note that `kind` nodes hosting external load balancer are not + // kubernetes nodes + ExternalLoadBalancerNodeRoleValue string = "external-load-balancer" + + // ExternalEtcdNodeRoleValue identifies a node that hosts an external-etcd + // instance. + // + // WARNING: this node type is not yet implemented! + // + // Please note that `kind` nodes hosting external etcd are not + // kubernetes nodes + ExternalEtcdNodeRoleValue string = "external-etcd" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go b/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go new file mode 100644 index 000000000..699bac510 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go @@ -0,0 +1,126 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "time" + + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create" + internalencoding "sigs.k8s.io/kind/pkg/internal/apis/config/encoding" +) + +// CreateOption is a Provider.Create option +type CreateOption interface { + apply(*internalcreate.ClusterOptions) error +} + +type createOptionAdapter func(*internalcreate.ClusterOptions) error + +func (c createOptionAdapter) apply(o *internalcreate.ClusterOptions) error { + return c(o) +} + +// CreateWithConfigFile configures the config file path to use +func CreateWithConfigFile(path string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + var err error + o.Config, err = internalencoding.Load(path) + return err + }) +} + +// CreateWithRawConfig configures the config to use from raw (yaml) bytes +func CreateWithRawConfig(raw []byte) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + var err error + o.Config, err = internalencoding.Parse(raw) + return err + }) +} + +// CreateWithV1Alpha4Config configures the cluster with a v1alpha4 config +func CreateWithV1Alpha4Config(config *v1alpha4.Cluster) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.Config = internalencoding.V1Alpha4ToInternal(config) + return nil + }) +} + +// CreateWithNodeImage overrides the image on all nodes in config +// as an easy way to change the Kubernetes version +func CreateWithNodeImage(nodeImage string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.NodeImage = nodeImage + return nil + }) +} + +// CreateWithRetain disables deletion of nodes and any other cleanup +// that would normally occur after a failure to create +// This is mainly used for debugging purposes +func CreateWithRetain(retain bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.Retain = retain + return nil + }) +} + +// CreateWithWaitForReady configures a maximum wait time for the control plane +// node(s) to be ready. By default no waiting is performed +func CreateWithWaitForReady(waitTime time.Duration) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.WaitForReady = waitTime + return nil + }) +} + +// CreateWithKubeconfigPath sets the explicit --kubeconfig path +func CreateWithKubeconfigPath(explicitPath string) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.KubeconfigPath = explicitPath + return nil + }) +} + +// CreateWithStopBeforeSettingUpKubernetes enables skipping setting up +// kubernetes (kubeadm init etc.) after creating node containers +// This generally shouldn't be used and is only lightly supported, but allows +// provisioning node containers for experimentation +func CreateWithStopBeforeSettingUpKubernetes(stopBeforeSettingUpKubernetes bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.StopBeforeSettingUpKubernetes = stopBeforeSettingUpKubernetes + return nil + }) +} + +// CreateWithDisplayUsage enables displaying usage if displayUsage is true +func CreateWithDisplayUsage(displayUsage bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.DisplayUsage = displayUsage + return nil + }) +} + +// CreateWithDisplaySalutation enables display a salutation at the end of create +// cluster if displaySalutation is true +func CreateWithDisplaySalutation(displaySalutation bool) CreateOption { + return createOptionAdapter(func(o *internalcreate.ClusterOptions) error { + o.DisplaySalutation = displaySalutation + return nil + }) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go new file mode 100644 index 000000000..af19392fd --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cluster implements kind kubernetes-in-docker cluster management +package cluster diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go new file mode 100644 index 000000000..61dd53cbf --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package actions + +import ( + "sync" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Action defines a step of bringing up a kind cluster after initial node +// container creation +type Action interface { + Execute(ctx *ActionContext) error +} + +// ActionContext is data supplied to all actions +type ActionContext struct { + Logger log.Logger + Status *cli.Status + Config *config.Cluster + Provider providers.Provider + cache *cachedData +} + +// NewActionContext returns a new ActionContext +func NewActionContext( + logger log.Logger, + status *cli.Status, + provider providers.Provider, + cfg *config.Cluster, +) *ActionContext { + return &ActionContext{ + Logger: logger, + Status: status, + Provider: provider, + Config: cfg, + cache: &cachedData{}, + } +} + +type cachedData struct { + mu sync.RWMutex + nodes []nodes.Node +} + +func (cd *cachedData) getNodes() []nodes.Node { + cd.mu.RLock() + defer cd.mu.RUnlock() + return cd.nodes +} + +func (cd *cachedData) setNodes(n []nodes.Node) { + cd.mu.Lock() + defer cd.mu.Unlock() + cd.nodes = n +} + +// Nodes returns the list of cluster nodes, this is a cached call +func (ac *ActionContext) Nodes() ([]nodes.Node, error) { + cachedNodes := ac.cache.getNodes() + if cachedNodes != nil { + return cachedNodes, nil + } + n, err := ac.Provider.ListNodes(ac.Config.Name) + if err != nil { + return nil, err + } + ac.cache.setNodes(n) + return n, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go new file mode 100644 index 000000000..172619807 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go @@ -0,0 +1,279 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the kubeadm config action +package config + +import ( + "bytes" + "fmt" + "net" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeadm" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/patch" +) + +// Action implements action for creating the node config files +type Action struct{} + +// NewAction returns a new action for creating the config files +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Writing configuration 📜") + defer ctx.Status.End(false) + + providerInfo, err := ctx.Provider.Info() + if err != nil { + return err + } + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + controlPlaneEndpoint, err := ctx.Provider.GetAPIServerInternalEndpoint(ctx.Config.Name) + if err != nil { + return err + } + + // create kubeadm init config + fns := []func() error{} + + provider := fmt.Sprintf("%s", ctx.Provider) + configData := kubeadm.ConfigData{ + NodeProvider: provider, + ClusterName: ctx.Config.Name, + ControlPlaneEndpoint: controlPlaneEndpoint, + APIBindPort: common.APIServerInternalPort, + APIServerAddress: ctx.Config.Networking.APIServerAddress, + Token: kubeadm.Token, + PodSubnet: ctx.Config.Networking.PodSubnet, + KubeProxyMode: string(ctx.Config.Networking.KubeProxyMode), + ServiceSubnet: ctx.Config.Networking.ServiceSubnet, + ControlPlane: true, + IPFamily: ctx.Config.Networking.IPFamily, + FeatureGates: ctx.Config.FeatureGates, + RuntimeConfig: ctx.Config.RuntimeConfig, + RootlessProvider: providerInfo.Rootless, + } + + kubeadmConfigPlusPatches := func(node nodes.Node, data kubeadm.ConfigData) func() error { + return func() error { + data.NodeName = node.String() + kubeadmConfig, err := getKubeadmConfig(ctx.Config, data, node, provider) + if err != nil { + // TODO(bentheelder): logging here + return errors.Wrap(err, "failed to generate kubeadm config content") + } + + ctx.Logger.V(2).Infof("Using the following kubeadm config for node %s:\n%s", node.String(), kubeadmConfig) + return writeKubeadmConfig(kubeadmConfig, node) + } + } + + // create the kubeadm join configuration for the kubernetes cluster nodes only + kubeNodes, err := nodeutils.InternalNodes(allNodes) + if err != nil { + return err + } + + for _, node := range kubeNodes { + node := node // capture loop variable + configData := configData // copy config data + fns = append(fns, kubeadmConfigPlusPatches(node, configData)) + } + + // Create the kubeadm config in all nodes concurrently + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + + // if we have containerd config, patch all the nodes concurrently + if len(ctx.Config.ContainerdConfigPatches) > 0 || len(ctx.Config.ContainerdConfigPatchesJSON6902) > 0 { + fns := make([]func() error, len(kubeNodes)) + for i, node := range kubeNodes { + node := node // capture loop variable + fns[i] = func() error { + // read and patch the config + const containerdConfigPath = "/etc/containerd/config.toml" + var buff bytes.Buffer + if err := node.Command("cat", containerdConfigPath).SetStdout(&buff).Run(); err != nil { + return errors.Wrap(err, "failed to read containerd config from node") + } + patched, err := patch.TOML(buff.String(), ctx.Config.ContainerdConfigPatches, ctx.Config.ContainerdConfigPatchesJSON6902) + if err != nil { + return errors.Wrap(err, "failed to patch containerd config") + } + if err := nodeutils.WriteFile(node, containerdConfigPath, patched); err != nil { + return errors.Wrap(err, "failed to write patched containerd config") + } + // restart containerd now that we've re-configured it + // skip if containerd is not running + if err := node.Command("bash", "-c", `! pgrep --exact containerd || systemctl restart containerd`).Run(); err != nil { + return errors.Wrap(err, "failed to restart containerd after patching config") + } + return nil + } + } + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + } + + // mark success + ctx.Status.End(true) + return nil +} + +// getKubeadmConfig generates the kubeadm config contents for the cluster +// by running data through the template and applying patches as needed. +func getKubeadmConfig(cfg *config.Cluster, data kubeadm.ConfigData, node nodes.Node, provider string) (path string, err error) { + kubeVersion, err := nodeutils.KubeVersion(node) + if err != nil { + // TODO(bentheelder): logging here + return "", errors.Wrap(err, "failed to get kubernetes version from node") + } + data.KubernetesVersion = kubeVersion + + // TODO: gross hack! + // identify node in config by matching name (since these are named in order) + // we should really just streamline the bootstrap code and maintain + // this mapping ... something for the next major refactor + var configNode *config.Node + namer := common.MakeNodeNamer("") + for i := range cfg.Nodes { + n := &cfg.Nodes[i] + nodeSuffix := namer(string(n.Role)) + if strings.HasSuffix(node.String(), nodeSuffix) { + configNode = n + } + } + if configNode == nil { + return "", errors.Errorf("failed to match node %q to config", node.String()) + } + + // get the node ip address + nodeAddress, nodeAddressIPv6, err := node.IP() + if err != nil { + return "", errors.Wrap(err, "failed to get IP for node") + } + + data.NodeAddress = nodeAddress + // configure the right protocol addresses + if cfg.Networking.IPFamily == config.IPv6Family || cfg.Networking.IPFamily == config.DualStackFamily { + if ip := net.ParseIP(nodeAddressIPv6); ip.To16() == nil { + return "", errors.Errorf("failed to get IPv6 address for node %s; is %s configured to use IPv6 correctly?", node.String(), provider) + } + data.NodeAddress = nodeAddressIPv6 + if cfg.Networking.IPFamily == config.DualStackFamily { + // order matters since the nodeAddress will be used later to configure the apiserver advertise address + // Ref: #2484 + primaryServiceSubnet := strings.Split(cfg.Networking.ServiceSubnet, ",")[0] + ip, _, err := net.ParseCIDR(primaryServiceSubnet) + if err != nil { + return "", fmt.Errorf("failed to parse primary Service Subnet %s (%s): %w", primaryServiceSubnet, cfg.Networking.ServiceSubnet, err) + } + if ip.To4() != nil { + data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddress, nodeAddressIPv6) + } else { + data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddressIPv6, nodeAddress) + } + } + } + + // configure the node labels + if len(configNode.Labels) > 0 { + data.NodeLabels = hashMapLabelsToCommaSeparatedLabels(configNode.Labels) + } + + // set the node role + data.ControlPlane = string(configNode.Role) == constants.ControlPlaneNodeRoleValue + + // generate the config contents + cf, err := kubeadm.Config(data) + if err != nil { + return "", err + } + + clusterPatches, clusterJSONPatches := allPatchesFromConfig(cfg) + // apply cluster-level patches first + patchedConfig, err := patch.KubeYAML(cf, clusterPatches, clusterJSONPatches) + if err != nil { + return "", err + } + + // if needed, apply current node's patches + if len(configNode.KubeadmConfigPatches) > 0 || len(configNode.KubeadmConfigPatchesJSON6902) > 0 { + patchedConfig, err = patch.KubeYAML(patchedConfig, configNode.KubeadmConfigPatches, configNode.KubeadmConfigPatchesJSON6902) + if err != nil { + return "", err + } + } + + // fix all the patches to have name metadata matching the generated config + return removeMetadata(patchedConfig), nil +} + +// trims out the metadata.name we put in the config for kustomize matching, +// kubeadm will complain about this otherwise +func removeMetadata(kustomized string) string { + return strings.Replace( + kustomized, + `metadata: + name: config +`, + "", + -1, + ) +} + +func allPatchesFromConfig(cfg *config.Cluster) (patches []string, jsonPatches []config.PatchJSON6902) { + return cfg.KubeadmConfigPatches, cfg.KubeadmConfigPatchesJSON6902 +} + +// writeKubeadmConfig writes the kubeadm configuration in the specified node +func writeKubeadmConfig(kubeadmConfig string, node nodes.Node) error { + // copy the config to the node + if err := nodeutils.WriteFile(node, "/kind/kubeadm.conf", kubeadmConfig); err != nil { + // TODO(bentheelder): logging here + return errors.Wrap(err, "failed to copy kubeadm config to node") + } + + return nil +} + +// hashMapLabelsToCommaSeparatedLabels converts labels in hashmap form to labels in a comma-separated string form like "key1=value1,key2=value2" +func hashMapLabelsToCommaSeparatedLabels(labels map[string]string) string { + output := "" + for key, value := range labels { + output += fmt.Sprintf("%s=%s,", key, value) + } + return strings.TrimSuffix(output, ",") // remove the last character (comma) in the output string +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go new file mode 100644 index 000000000..fc9675173 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go @@ -0,0 +1,134 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package installcni implements the install CNI action +package installcni + +import ( + "bytes" + "strings" + + "github.com/google/safetext/yamltemplate" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/patch" +) + +type action struct{} + +// NewAction returns a new action for installing default CNI +func NewAction() actions.Action { + return &action{} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Installing CNI 🔌") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // read the manifest from the node + var raw bytes.Buffer + if err := node.Command("cat", "/kind/manifests/default-cni.yaml").SetStdout(&raw).Run(); err != nil { + return errors.Wrap(err, "failed to read CNI manifest") + } + manifest := raw.String() + + // TODO: remove this check? + // backwards compatibility for mounting your own manifest file to the default + // location + // NOTE: this is intentionally undocumented, as an internal implementation + // detail. Going forward users should disable the default CNI and install + // their own, or use the default. The internal templating mechanism is + // not intended for external usage and is unstable. + if strings.Contains(manifest, "would you kindly template this file") { + t, err := yamltemplate.New("cni-manifest").Parse(manifest) + if err != nil { + return errors.Wrap(err, "failed to parse CNI manifest template") + } + var out bytes.Buffer + err = t.Execute(&out, &struct { + PodSubnet string + }{ + PodSubnet: ctx.Config.Networking.PodSubnet, + }) + if err != nil { + return errors.Wrap(err, "failed to execute CNI manifest template") + } + manifest = out.String() + } + + // NOTE: this is intentionally undocumented, as an internal implementation + // detail. Going forward users should disable the default CNI and install + // their own, or use the default. The internal templating mechanism is + // not intended for external usage and is unstable. + if strings.Contains(manifest, "would you kindly patch this file") { + // Add the controlplane endpoint so kindnet doesn´t have to wait for kube-proxy + controlPlaneEndpoint, err := ctx.Provider.GetAPIServerInternalEndpoint(ctx.Config.Name) + if err != nil { + return err + } + + patchValue := ` +- op: add + path: /spec/template/spec/containers/0/env/- + value: + name: CONTROL_PLANE_ENDPOINT + value: ` + controlPlaneEndpoint + + controlPlanePatch6902 := config.PatchJSON6902{ + Group: "apps", + Version: "v1", + Kind: "DaemonSet", + Patch: patchValue, + } + + patchedConfig, err := patch.KubeYAML(manifest, nil, []config.PatchJSON6902{controlPlanePatch6902}) + if err != nil { + return err + } + manifest = patchedConfig + } + + ctx.Logger.V(5).Infof("Using the following Kindnetd config:\n%s", manifest) + + // install the manifest + if err := node.Command( + "kubectl", "create", "--kubeconfig=/etc/kubernetes/admin.conf", + "-f", "-", + ).SetStdin(strings.NewReader(manifest)).Run(); err != nil { + return errors.Wrap(err, "failed to apply overlay network") + } + + // mark success + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go new file mode 100644 index 000000000..fa9a095b0 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package installstorage implements the an action to install a default +// storageclass +package installstorage + +import ( + "bytes" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" +) + +type action struct{} + +// NewAction returns a new action for installing storage +func NewAction() actions.Action { + return &action{} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Installing StorageClass 💾") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // add the default storage class + if err := addDefaultStorage(ctx.Logger, node); err != nil { + return errors.Wrap(err, "failed to add default storage class") + } + + // mark success + ctx.Status.End(true) + return nil +} + +// legacy default storage class +// we need this for e2es (StatefulSet) +// newer kind images ship a storage driver manifest +const defaultStorageManifest = `# host-path based default storage class +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + namespace: kube-system + name: standard + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/host-path` + +func addDefaultStorage(logger log.Logger, controlPlane nodes.Node) error { + // start with fallback default, and then try to get the newer kind node + // storage manifest if present + manifest := defaultStorageManifest + var raw bytes.Buffer + if err := controlPlane.Command("cat", "/kind/manifests/default-storage.yaml").SetStdout(&raw).Run(); err != nil { + logger.Warn("Could not read storage manifest, falling back on old k8s.io/host-path default ...") + } else { + manifest = raw.String() + } + + // apply the manifest + in := strings.NewReader(manifest) + cmd := controlPlane.Command( + "kubectl", + "--kubeconfig=/etc/kubernetes/admin.conf", "apply", "-f", "-", + ) + cmd.SetStdin(in) + return cmd.Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go new file mode 100644 index 000000000..cc587940e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadminit implements the kubeadm init action +package kubeadminit + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// kubeadmInitAction implements action for executing the kubeadm init +// and a set of default post init operations like e.g. install the +// CNI network plugin. +type action struct { + skipKubeProxy bool +} + +// NewAction returns a new action for kubeadm init +func NewAction(cfg *config.Cluster) actions.Action { + return &action{skipKubeProxy: cfg.Networking.KubeProxyMode == config.NoneProxyMode} +} + +// Execute runs the action +func (a *action) Execute(ctx *actions.ActionContext) error { + ctx.Status.Start("Starting control-plane 🕹️") + defer ctx.Status.End(false) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // get the target node for this task + // TODO: eliminate the concept of bootstrapcontrolplane node entirely + // outside this method + node, err := nodeutils.BootstrapControlPlaneNode(allNodes) + if err != nil { + return err + } + + // skip preflight checks, as these have undesirable side effects + // and don't tell us much. requires kubeadm 1.13+ + skipPhases := "preflight" + if a.skipKubeProxy { + skipPhases += ",addon/kube-proxy" + } + + // run kubeadm + cmd := node.Command( + // init because this is the control plane node + "kubeadm", "init", + "--skip-phases="+skipPhases, + // specify our generated config file + "--config=/kind/kubeadm.conf", + "--skip-token-print", + // increase verbosity for debugging + "--v=6", + ) + lines, err := exec.CombinedOutputLines(cmd) + ctx.Logger.V(3).Info(strings.Join(lines, "\n")) + if err != nil { + return errors.Wrap(err, "failed to init node with kubeadm") + } + + // copy some files to the other control plane nodes + otherControlPlanes, err := nodeutils.SecondaryControlPlaneNodes(allNodes) + if err != nil { + return err + } + for _, otherNode := range otherControlPlanes { + for _, file := range []string{ + // copy over admin config so we can use any control plane to get it later + "/etc/kubernetes/admin.conf", + // copy over certs + "/etc/kubernetes/pki/ca.crt", "/etc/kubernetes/pki/ca.key", + "/etc/kubernetes/pki/front-proxy-ca.crt", "/etc/kubernetes/pki/front-proxy-ca.key", + "/etc/kubernetes/pki/sa.pub", "/etc/kubernetes/pki/sa.key", + // TODO: if we gain external etcd support these will be + // handled differently + "/etc/kubernetes/pki/etcd/ca.crt", "/etc/kubernetes/pki/etcd/ca.key", + } { + if err := nodeutils.CopyNodeToNode(node, otherNode, file); err != nil { + return errors.Wrap(err, "failed to copy admin kubeconfig") + } + } + } + + // if we are only provisioning one node, remove the control plane taint + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#master-isolation + if len(allNodes) == 1 { + // TODO: Once kubeadm 1.23 is no longer supported remove the <1.24 handling. + // TODO: Once kubeadm 1.24 is no longer supported remove the <1.25 handling. + // https://github.com/kubernetes-sigs/kind/issues/1699 + rawVersion, err := nodeutils.KubeVersion(node) + if err != nil { + return errors.Wrap(err, "failed to get Kubernetes version from node") + } + kubeVersion, err := version.ParseSemantic(rawVersion) + if err != nil { + return errors.Wrap(err, "could not parse Kubernetes version") + } + var taints []string + if kubeVersion.LessThan(version.MustParseSemantic("v1.24.0-alpha.1.592+370031cadac624")) { + // for versions older than 1.24 prerelease remove only the old taint + taints = []string{"node-role.kubernetes.io/master-"} + } else if kubeVersion.LessThan(version.MustParseSemantic("v1.25.0-alpha.0.557+84c8afeba39ec9")) { + // for versions between 1.24 and 1.25 prerelease remove both the old and new taint + taints = []string{"node-role.kubernetes.io/control-plane-", "node-role.kubernetes.io/master-"} + } else { + // for any newer version only remove the new taint + taints = []string{"node-role.kubernetes.io/control-plane-"} + } + taintArgs := []string{"--kubeconfig=/etc/kubernetes/admin.conf", "taint", "nodes", "--all"} + taintArgs = append(taintArgs, taints...) + + if err := node.Command( + "kubectl", taintArgs..., + ).Run(); err != nil { + return errors.Wrap(err, "failed to remove control plane taint") + } + } + + // mark success + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go new file mode 100644 index 000000000..fbd33555d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go @@ -0,0 +1,139 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadmjoin implements the kubeadm join action +package kubeadmjoin + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" +) + +// Action implements action for creating the kubeadm join +// and deploying it on the bootstrap control-plane node. +type Action struct{} + +// NewAction returns a new action for creating the kubeadm jion +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // join secondary control plane nodes if any + secondaryControlPlanes, err := nodeutils.SecondaryControlPlaneNodes(allNodes) + if err != nil { + return err + } + if len(secondaryControlPlanes) > 0 { + if err := joinSecondaryControlPlanes(ctx, secondaryControlPlanes); err != nil { + return err + } + } + + // then join worker nodes if any + workers, err := nodeutils.SelectNodesByRole(allNodes, constants.WorkerNodeRoleValue) + if err != nil { + return err + } + if len(workers) > 0 { + if err := joinWorkers(ctx, workers); err != nil { + return err + } + } + + return nil +} + +func joinSecondaryControlPlanes( + ctx *actions.ActionContext, + secondaryControlPlanes []nodes.Node, +) error { + ctx.Status.Start("Joining more control-plane nodes 🎮") + defer ctx.Status.End(false) + + // TODO(bentheelder): it's too bad we can't do this concurrently + // (this is not safe currently) + for _, node := range secondaryControlPlanes { + node := node // capture loop variable + if err := runKubeadmJoin(ctx.Logger, node); err != nil { + return err + } + } + + ctx.Status.End(true) + return nil +} + +func joinWorkers( + ctx *actions.ActionContext, + workers []nodes.Node, +) error { + ctx.Status.Start("Joining worker nodes 🚜") + defer ctx.Status.End(false) + + // create the workers concurrently + fns := []func() error{} + for _, node := range workers { + node := node // capture loop variable + fns = append(fns, func() error { + return runKubeadmJoin(ctx.Logger, node) + }) + } + if err := errors.UntilErrorConcurrent(fns); err != nil { + return err + } + + ctx.Status.End(true) + return nil +} + +// runKubeadmJoin executes kubeadm join command +func runKubeadmJoin(logger log.Logger, node nodes.Node) error { + // run kubeadm join + // TODO(bentheelder): this should be using the config file + cmd := node.Command( + "kubeadm", "join", + // the join command uses the config file generated in a well known location + "--config", "/kind/kubeadm.conf", + // skip preflight checks, as these have undesirable side effects + // and don't tell us much. requires kubeadm 1.13+ + "--skip-phases=preflight", + // increase verbosity for debugging + "--v=6", + ) + lines, err := exec.CombinedOutputLines(cmd) + logger.V(3).Info(strings.Join(lines, "\n")) + if err != nil { + return errors.Wrap(err, "failed to join node with kubeadm") + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go new file mode 100644 index 000000000..bcc2fd107 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loadbalancer implements the load balancer configuration action +package loadbalancer + +import ( + "fmt" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" +) + +// Action implements and action for configuring and starting the +// external load balancer in front of the control-plane nodes. +type Action struct{} + +// NewAction returns a new Action for configuring the load balancer +func NewAction() actions.Action { + return &Action{} +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + + // identify external load balancer node + loadBalancerNode, err := nodeutils.ExternalLoadBalancerNode(allNodes) + if err != nil { + return err + } + + // if there's no loadbalancer we're done + if loadBalancerNode == nil { + return nil + } + + // otherwise notify the user + ctx.Status.Start("Configuring the external load balancer ⚖️") + defer ctx.Status.End(false) + + // collect info about the existing controlplane nodes + var backendServers = map[string]string{} + controlPlaneNodes, err := nodeutils.SelectNodesByRole( + allNodes, + constants.ControlPlaneNodeRoleValue, + ) + if err != nil { + return err + } + for _, n := range controlPlaneNodes { + backendServers[n.String()] = fmt.Sprintf("%s:%d", n.String(), common.APIServerInternalPort) + } + + // create loadbalancer config data + loadbalancerConfig, err := loadbalancer.Config(&loadbalancer.ConfigData{ + ControlPlanePort: common.APIServerInternalPort, + BackendServers: backendServers, + IPv6: ctx.Config.Networking.IPFamily == config.IPv6Family, + }) + if err != nil { + return errors.Wrap(err, "failed to generate loadbalancer config data") + } + + // create loadbalancer config on the node + if err := nodeutils.WriteFile(loadBalancerNode, loadbalancer.ConfigPath, loadbalancerConfig); err != nil { + // TODO: logging here + return errors.Wrap(err, "failed to copy loadbalancer config to node") + } + + // reload the config. haproxy will reload on SIGHUP + if err := loadBalancerNode.Command("kill", "-s", "HUP", "1").Run(); err != nil { + return errors.Wrap(err, "failed to reload loadbalancer") + } + + ctx.Status.End(true) + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go new file mode 100644 index 000000000..d106178b2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go @@ -0,0 +1,147 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package waitforready implements the wait for ready action +package waitforready + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// Action implements an action for waiting for the cluster to be ready +type Action struct { + waitTime time.Duration +} + +// NewAction returns a new action for waiting for the cluster to be ready +func NewAction(waitTime time.Duration) actions.Action { + return &Action{ + waitTime: waitTime, + } +} + +// Execute runs the action +func (a *Action) Execute(ctx *actions.ActionContext) error { + // skip entirely if the wait time is 0 + if a.waitTime == time.Duration(0) { + return nil + } + ctx.Status.Start( + fmt.Sprintf( + "Waiting ≤ %s for control-plane = Ready ⏳", + formatDuration(a.waitTime), + ), + ) + + allNodes, err := ctx.Nodes() + if err != nil { + return err + } + // get a control plane node to use to check cluster status + controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes) + if err != nil { + return err + } + node := controlPlanes[0] // kind expects at least one always + + // Wait for the nodes to reach Ready status. + startTime := time.Now() + + // TODO: Remove the below handling once kubeadm 1.23 is no longer supported. + // https://github.com/kubernetes-sigs/kind/issues/1699 + rawVersion, err := nodeutils.KubeVersion(node) + if err != nil { + return errors.Wrap(err, "failed to get Kubernetes version from node") + } + kubeVersion, err := version.ParseSemantic(rawVersion) + if err != nil { + return errors.Wrap(err, "could not parse Kubernetes version") + } + selectorLabel := "node-role.kubernetes.io/control-plane" + if kubeVersion.LessThan(version.MustParseSemantic("v1.24.0-alpha.1.591+a3d5e5598290df")) { + selectorLabel = "node-role.kubernetes.io/master" + } + + isReady := waitForReady(node, startTime.Add(a.waitTime), selectorLabel) + if !isReady { + ctx.Status.End(false) + ctx.Logger.V(0).Info(" • WARNING: Timed out waiting for Ready ⚠️") + return nil + } + + // mark success + ctx.Status.End(true) + ctx.Logger.V(0).Infof(" • Ready after %s 💚", formatDuration(time.Since(startTime))) + return nil +} + +// WaitForReady uses kubectl inside the "node" container to check if the +// control plane nodes are "Ready". +func waitForReady(node nodes.Node, until time.Time, selectorLabel string) bool { + return tryUntil(until, func() bool { + cmd := node.Command( + "kubectl", + "--kubeconfig=/etc/kubernetes/admin.conf", + "get", + "nodes", + "--selector="+selectorLabel, + // When the node reaches status ready, the status field will be set + // to true. + "-o=jsonpath='{.items..status.conditions[-1:].status}'", + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return false + } + + // 'lines' will return the status of all nodes labeled as master. For + // example, if we have three control plane nodes, and all are ready, + // then the status will have the following format: `True True True'. + status := strings.Fields(lines[0]) + for _, s := range status { + // Check node status. If node is ready then this will be 'True', + // 'False' or 'Unknown' otherwise. + if !strings.Contains(s, "True") { + return false + } + } + return true + }) +} + +// helper that calls `try()“ in a loop until the deadline `until` +// has passed or `try()`returns true, returns whether try ever returned true +func tryUntil(until time.Time, try func() bool) bool { + for until.After(time.Now()) { + if try() { + return true + } + } + return false +} + +func formatDuration(duration time.Duration) string { + return duration.Round(time.Second).String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go new file mode 100644 index 000000000..351ba6c75 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go @@ -0,0 +1,257 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "fmt" + "math/rand" + "time" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/cluster/internal/delete" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/apis/config/encoding" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" + configaction "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" +) + +const ( + // Typical host name max limit is 64 characters (https://linux.die.net/man/2/sethostname) + // We append -control-plane (14 characters) to the cluster name on the control plane container + clusterNameMax = 50 +) + +// ClusterOptions holds cluster creation options +type ClusterOptions struct { + Config *config.Cluster + NameOverride string // overrides config.Name + // NodeImage overrides the nodes' images in Config if non-zero + NodeImage string + Retain bool + WaitForReady time.Duration + KubeconfigPath string + // see https://github.com/kubernetes-sigs/kind/issues/324 + StopBeforeSettingUpKubernetes bool // if false kind should setup kubernetes after creating nodes + // Options to control output + DisplayUsage bool + DisplaySalutation bool +} + +// Cluster creates a cluster +func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) error { + // validate provider first + if err := validateProvider(p); err != nil { + return err + } + + // default / process options (namely config) + if err := fixupOptions(opts); err != nil { + return err + } + + // Check if the cluster name already exists + if err := alreadyExists(p, opts.Config.Name); err != nil { + return err + } + + // warn if cluster name might typically be too long + if len(opts.Config.Name) > clusterNameMax { + logger.Warnf("cluster name %q is probably too long, this might not work properly on some systems", opts.Config.Name) + } + + // then validate + if err := opts.Config.Validate(); err != nil { + return err + } + + // setup a status object to show progress to the user + status := cli.StatusForLogger(logger) + + // we're going to start creating now, tell the user + logger.V(0).Infof("Creating cluster %q ...\n", opts.Config.Name) + + // Create node containers implementing defined config Nodes + if err := p.Provision(status, opts.Config); err != nil { + // In case of errors nodes are deleted (except if retain is explicitly set) + if !opts.Retain { + _ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath) + } + return err + } + + // TODO(bentheelder): make this controllable from the command line? + actionsToRun := []actions.Action{ + loadbalancer.NewAction(), // setup external loadbalancer + configaction.NewAction(), // setup kubeadm config + } + if !opts.StopBeforeSettingUpKubernetes { + actionsToRun = append(actionsToRun, + kubeadminit.NewAction(opts.Config), // run kubeadm init + ) + // this step might be skipped, but is next after init + if !opts.Config.Networking.DisableDefaultCNI { + actionsToRun = append(actionsToRun, + installcni.NewAction(), // install CNI + ) + } + // add remaining steps + actionsToRun = append(actionsToRun, + installstorage.NewAction(), // install StorageClass + kubeadmjoin.NewAction(), // run kubeadm join + waitforready.NewAction(opts.WaitForReady), // wait for cluster readiness + ) + } + + // run all actions + actionsContext := actions.NewActionContext(logger, status, p, opts.Config) + for _, action := range actionsToRun { + if err := action.Execute(actionsContext); err != nil { + if !opts.Retain { + _ = delete.Cluster(logger, p, opts.Config.Name, opts.KubeconfigPath) + } + return err + } + } + + // skip the rest if we're not setting up kubernetes + if opts.StopBeforeSettingUpKubernetes { + return nil + } + + // try exporting kubeconfig with backoff for locking failures + // TODO: factor out into a public errors API w/ backoff handling? + // for now this is easier than coming up with a good API + var err error + for _, b := range []time.Duration{0, time.Millisecond, time.Millisecond * 50, time.Millisecond * 100} { + time.Sleep(b) + if err = kubeconfig.Export(p, opts.Config.Name, opts.KubeconfigPath, true); err == nil { + break + } + } + if err != nil { + return err + } + + // optionally display usage + if opts.DisplayUsage { + logUsage(logger, opts.Config.Name, opts.KubeconfigPath) + } + // optionally give the user a friendly salutation + if opts.DisplaySalutation { + logger.V(0).Info("") + logSalutation(logger) + } + return nil +} + +// alreadyExists returns an error if the cluster name already exists +// or if we had an error checking +func alreadyExists(p providers.Provider, name string) error { + n, err := p.ListNodes(name) + if err != nil { + return err + } + if len(n) != 0 { + return errors.Errorf("node(s) already exist for a cluster with the name %q", name) + } + return nil +} + +func logUsage(logger log.Logger, name, explicitKubeconfigPath string) { + // construct a sample command for interacting with the cluster + kctx := kubeconfig.ContextForCluster(name) + sampleCommand := fmt.Sprintf("kubectl cluster-info --context %s", kctx) + if explicitKubeconfigPath != "" { + // explicit path, include this + sampleCommand += " --kubeconfig " + shellescape.Quote(explicitKubeconfigPath) + } + logger.V(0).Infof(`Set kubectl context to "%s"`, kctx) + logger.V(0).Infof("You can now use your cluster with:\n\n" + sampleCommand) +} + +func logSalutation(logger log.Logger) { + salutations := []string{ + "Have a nice day! 👋", + "Thanks for using kind! 😊", + "Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/", + "Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂", + } + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + s := salutations[r.Intn(len(salutations))] + logger.V(0).Info(s) +} + +func fixupOptions(opts *ClusterOptions) error { + // do post processing for options + // first ensure we at least have a default cluster config + if opts.Config == nil { + cfg, err := encoding.Load("") + if err != nil { + return err + } + opts.Config = cfg + } + + if opts.NameOverride != "" { + opts.Config.Name = opts.NameOverride + } + + // if NodeImage was set, override the image on all nodes + if opts.NodeImage != "" { + // Apply image override to all the Nodes defined in Config + // TODO(fabrizio pandini): this should be reconsidered when implementing + // https://github.com/kubernetes-sigs/kind/issues/133 + for i := range opts.Config.Nodes { + opts.Config.Nodes[i].Image = opts.NodeImage + } + } + + // default config fields (important for usage as a library, where the config + // may be constructed in memory rather than from disk) + config.SetDefaultsCluster(opts.Config) + + return nil +} + +func validateProvider(p providers.Provider) error { + info, err := p.Info() + if err != nil { + return err + } + if info.Rootless { + if !info.Cgroup2 { + return errors.New("running kind with rootless provider requires cgroup v2, see https://kind.sigs.k8s.io/docs/user/rootless/") + } + if !info.SupportsMemoryLimit || !info.SupportsPidsLimit || !info.SupportsCPUShares { + return errors.New("running kind with rootless provider requires setting systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/") + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go new file mode 100644 index 000000000..1a5e2d15c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package delete + +import ( + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Cluster deletes the cluster identified by ctx +// explicitKubeconfigPath is --kubeconfig, following the rules from +// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands +func Cluster(logger log.Logger, p providers.Provider, name, explicitKubeconfigPath string) error { + n, err := p.ListNodes(name) + if err != nil { + return errors.Wrap(err, "error listing nodes") + } + + kerr := kubeconfig.Remove(name, explicitKubeconfigPath) + if kerr != nil { + logger.Errorf("failed to update kubeconfig: %v", kerr) + } + + if len(n) > 0 { + err = p.DeleteNodes(n) + if err != nil { + return err + } + logger.V(0).Infof("Deleted nodes: %q", n) + } + + if kerr != nil { + return kerr + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go new file mode 100644 index 000000000..6aa175819 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go @@ -0,0 +1,522 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/google/safetext/yamltemplate" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// ConfigData is supplied to the kubeadm config template, with values populated +// by the cluster package +type ConfigData struct { + ClusterName string + KubernetesVersion string + // The ControlPlaneEndpoint, that is the address of the external loadbalancer + // if defined or the bootstrap node + ControlPlaneEndpoint string + // The Local API Server port + APIBindPort int + // The API server external listen IP (which we will port forward) + APIServerAddress string + + // this should really be used for the --provider-id flag + // ideally cluster config should not depend on the node backend otherwise ... + NodeProvider string + + // ControlPlane flag specifies the node belongs to the control plane + ControlPlane bool + // The IP address or comma separated list IP addresses of of the node + NodeAddress string + // The name for the node (not the address) + NodeName string + + // The Token for TLS bootstrap + Token string + + // KubeProxyMode defines the kube-proxy mode between iptables or ipvs + KubeProxyMode string + // The subnet used for pods + PodSubnet string + // The subnet used for services + ServiceSubnet string + + // Kubernetes FeatureGates + FeatureGates map[string]bool + + // Kubernetes API Server RuntimeConfig + RuntimeConfig map[string]string + + // IPFamily of the cluster, it can be IPv4, IPv6 or DualStack + IPFamily config.ClusterIPFamily + + // Labels are the labels, in the format "key1=val1,key2=val2", with which the respective node will be labeled + NodeLabels string + + // RootlessProvider is true if kind is running with rootless mode + RootlessProvider bool + + // DisableLocalStorageCapacityIsolation is typically set true based on RootlessProvider + // based on the Kubernetes version, if true kubelet localStorageCapacityIsolation is set false + DisableLocalStorageCapacityIsolation bool + + // DerivedConfigData contains fields computed from the other fields for use + // in the config templates and should only be populated by calling Derive() + DerivedConfigData +} + +// DerivedConfigData fields are automatically derived by +// ConfigData.Derive if they are not specified / zero valued +type DerivedConfigData struct { + // AdvertiseAddress is the first address in NodeAddress + AdvertiseAddress string + // DockerStableTag is automatically derived from KubernetesVersion + DockerStableTag string + // SortedFeatureGates allows us to iterate FeatureGates deterministically + SortedFeatureGates []FeatureGate + // FeatureGatesString is of the form `Foo=true,Baz=false` + FeatureGatesString string + // RuntimeConfigString is of the form `Foo=true,Baz=false` + RuntimeConfigString string + // KubeadmFeatureGates contains Kubeadm only feature gates + KubeadmFeatureGates map[string]bool + // IPv4 values take precedence over IPv6 by default, if true set IPv6 default values + IPv6 bool + // kubelet cgroup driver, based on kubernetes version + CgroupDriver string +} + +type FeatureGate struct { + Name string + Value bool +} + +// Derive automatically derives DockerStableTag if not specified +func (c *ConfigData) Derive() { + // default cgroup driver + // TODO: refactor and move all deriving logic to this method + c.CgroupDriver = "systemd" + + // get the first address to use it as the API advertised address + c.AdvertiseAddress = strings.Split(c.NodeAddress, ",")[0] + + if c.DockerStableTag == "" { + c.DockerStableTag = strings.Replace(c.KubernetesVersion, "+", "_", -1) + } + + // get the IP addresses family for defaulting components + c.IPv6 = c.IPFamily == config.IPv6Family + + // get sorted list of FeatureGate keys + featureGateKeys := make([]string, 0, len(c.FeatureGates)) + for k := range c.FeatureGates { + featureGateKeys = append(featureGateKeys, k) + } + sort.Strings(featureGateKeys) + + // create a sorted key=value,... string of FeatureGates + c.SortedFeatureGates = make([]FeatureGate, 0, len(c.FeatureGates)) + featureGates := make([]string, 0, len(c.FeatureGates)) + for _, k := range featureGateKeys { + v := c.FeatureGates[k] + featureGates = append(featureGates, fmt.Sprintf("%s=%t", k, v)) + c.SortedFeatureGates = append(c.SortedFeatureGates, FeatureGate{ + Name: k, + Value: v, + }) + } + c.FeatureGatesString = strings.Join(featureGates, ",") + + // create a sorted key=value,... string of RuntimeConfig + // first get sorted list of FeatureGate keys + runtimeConfigKeys := make([]string, 0, len(c.RuntimeConfig)) + for k := range c.RuntimeConfig { + runtimeConfigKeys = append(runtimeConfigKeys, k) + } + sort.Strings(runtimeConfigKeys) + // stringify + var runtimeConfig []string + for _, k := range runtimeConfigKeys { + v := c.RuntimeConfig[k] + // TODO: do we need to quote / escape these in the future? + // Currently runtime config is in practice booleans, no special characters + runtimeConfig = append(runtimeConfig, fmt.Sprintf("%s=%s", k, v)) + } + c.RuntimeConfigString = strings.Join(runtimeConfig, ",") +} + +// See docs for these APIs at: +// https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories +// EG: +// https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 + +// ConfigTemplateBetaV2 is the kubeadm config template for API version v1beta2 +const ConfigTemplateBetaV2 = `# config generated by kind +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +metadata: + name: config +kubernetesVersion: {{.KubernetesVersion}} +clusterName: "{{.ClusterName}}" +{{ if .KubeadmFeatureGates}}featureGates: +{{ range $key, $value := .KubeadmFeatureGates }} + "{{ (StructuralData $key) }}": {{ $value }} +{{end}}{{end}} +controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}" +# on docker for mac we have to expose the api server via port forward, +# so we need to ensure the cert is valid for localhost so we can talk +# to the cluster after rewriting the kubeconfig to point to localhost +apiServer: + certSANs: [localhost, "{{.APIServerAddress}}"] + extraArgs: + "runtime-config": "{{ .RuntimeConfigString }}" +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end}} +controllerManager: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + enable-hostpath-provisioner: "true" + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::" + {{- end }} +scheduler: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::1" + {{- end }} +networking: + podSubnet: "{{ .PodSubnet }}" + serviceSubnet: "{{ .ServiceSubnet }}" +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +metadata: + name: config +# we use a well know token for TLS bootstrap +bootstrapTokens: +- token: "{{ .Token }}" +# we use a well know port for making the API server discoverable inside docker network. +# from the host machine such port will be accessible via a random local port instead. +localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +--- +# no-op entry that exists solely so it can be patched +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +metadata: + name: config +{{ if .ControlPlane -}} +controlPlane: + localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +{{- end }} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +discovery: + bootstrapToken: + apiServerEndpoint: "{{ .ControlPlaneEndpoint }}" + token: "{{ .Token }}" + unsafeSkipCAVerification: true +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +metadata: + name: config +cgroupDriver: {{ .CgroupDriver }} +cgroupRoot: /kubelet +failSwapOn: false +# configure ipv6 addresses in IPv6 mode +{{ if .IPv6 -}} +address: "::" +healthzBindAddress: "::" +{{- end }} +# disable disk resource management by default +# kubelet will see the host disk that the inner container runtime +# is ultimately backed by and attempt to recover disk space. we don't want that. +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +{{if ne .KubeProxyMode "None"}} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metadata: + name: config +mode: "{{ .KubeProxyMode }}" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +iptables: + minSyncPeriod: 1s +conntrack: +# Skip setting sysctl value "net.netfilter.nf_conntrack_max" +# It is a global variable that affects other namespaces + maxPerCore: 0 +{{if .RootlessProvider}} +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" + tcpEstablishedTimeout: 0s +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" + tcpCloseWaitTimeout: 0s +{{end}}{{end}} +` + +// ConfigTemplateBetaV3 is the kubeadm config template for API version v1beta3 +const ConfigTemplateBetaV3 = `# config generated by kind +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +metadata: + name: config +kubernetesVersion: {{.KubernetesVersion}} +clusterName: "{{.ClusterName}}" +{{ if .KubeadmFeatureGates}}featureGates: +{{ range $key, $value := .KubeadmFeatureGates }} + "{{ (StructuralData $key) }}": {{ $value }} +{{end}}{{end}} +controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}" +# on docker for mac we have to expose the api server via port forward, +# so we need to ensure the cert is valid for localhost so we can talk +# to the cluster after rewriting the kubeconfig to point to localhost +apiServer: + certSANs: [localhost, "{{.APIServerAddress}}"] + extraArgs: + "runtime-config": "{{ .RuntimeConfigString }}" +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end}} +controllerManager: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + enable-hostpath-provisioner: "true" + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::" + {{- end }} +scheduler: + extraArgs: +{{ if .FeatureGates }} + "feature-gates": "{{ .FeatureGatesString }}" +{{ end }} + # configure ipv6 default addresses for IPv6 clusters + {{ if .IPv6 -}} + bind-address: "::1" + {{- end }} +networking: + podSubnet: "{{ .PodSubnet }}" + serviceSubnet: "{{ .ServiceSubnet }}" +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +metadata: + name: config +# we use a well know token for TLS bootstrap +bootstrapTokens: +- token: "{{ .Token }}" +# we use a well know port for making the API server discoverable inside docker network. +# from the host machine such port will be accessible via a random local port instead. +localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +--- +# no-op entry that exists solely so it can be patched +apiVersion: kubeadm.k8s.io/v1beta3 +kind: JoinConfiguration +metadata: + name: config +{{ if .ControlPlane -}} +controlPlane: + localAPIEndpoint: + advertiseAddress: "{{ .AdvertiseAddress }}" + bindPort: {{.APIBindPort}} +{{- end }} +nodeRegistration: + criSocket: "unix:///run/containerd/containerd.sock" + kubeletExtraArgs: + node-ip: "{{ .NodeAddress }}" + provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}" + node-labels: "{{ .NodeLabels }}" +discovery: + bootstrapToken: + apiServerEndpoint: "{{ .ControlPlaneEndpoint }}" + token: "{{ .Token }}" + unsafeSkipCAVerification: true +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +metadata: + name: config +cgroupDriver: {{ .CgroupDriver }} +cgroupRoot: /kubelet +failSwapOn: false +# configure ipv6 addresses in IPv6 mode +{{ if .IPv6 -}} +address: "::" +healthzBindAddress: "::" +{{- end }} +# disable disk resource management by default +# kubelet will see the host disk that the inner container runtime +# is ultimately backed by and attempt to recover disk space. we don't want that. +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +{{if .DisableLocalStorageCapacityIsolation}}localStorageCapacityIsolation: false{{end}} +{{if ne .KubeProxyMode "None"}} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +kind: KubeProxyConfiguration +metadata: + name: config +mode: "{{ .KubeProxyMode }}" +{{if .FeatureGates}}featureGates: +{{ range $index, $gate := .SortedFeatureGates }} + "{{ (StructuralData $gate.Name) }}": {{ $gate.Value }} +{{end}}{{end}} +iptables: + minSyncPeriod: 1s +conntrack: +# Skip setting sysctl value "net.netfilter.nf_conntrack_max" +# It is a global variable that affects other namespaces + maxPerCore: 0 +{{if .RootlessProvider}} +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" + tcpEstablishedTimeout: 0s +# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" + tcpCloseWaitTimeout: 0s +{{end}}{{end}} +` + +// Config returns a kubeadm config generated from config data, in particular +// the kubernetes version +func Config(data ConfigData) (config string, err error) { + ver, err := version.ParseGeneric(data.KubernetesVersion) + if err != nil { + return "", err + } + + // ensure featureGates is non-nil, as we may add entries + if data.FeatureGates == nil { + data.FeatureGates = make(map[string]bool) + } + + if data.RootlessProvider { + if ver.LessThan(version.MustParseSemantic("v1.22.0")) { + // rootless kind v0.12.x supports Kubernetes v1.22 with KubeletInUserNamespace gate. + // rootless kind v0.11.x supports older Kubernetes with fake procfs. + return "", errors.Errorf("version %q is not compatible with rootless provider (hint: kind v0.11.x may work with this version)", ver) + } + data.FeatureGates["KubeletInUserNamespace"] = true + + // For avoiding err="failed to get rootfs info: failed to get device for dir \"/var/lib/kubelet\": could not find device with major: 0, minor: 41 in cached partitions map" + // https://github.com/kubernetes-sigs/kind/issues/2524 + if ver.LessThan(version.MustParseSemantic("v1.25.0-alpha.3.440+0064010cddfa00")) { + // this feature gate was removed in v1.25 and replaced by an opt-out to disable + data.FeatureGates["LocalStorageCapacityIsolation"] = false + } else { + // added in v1.25 https://github.com/kubernetes/kubernetes/pull/111513 + data.DisableLocalStorageCapacityIsolation = true + } + } + + // assume the latest API version, then fallback if the k8s version is too low + templateSource := ConfigTemplateBetaV3 + if ver.LessThan(version.MustParseSemantic("v1.23.0")) { + templateSource = ConfigTemplateBetaV2 + } + + t, err := yamltemplate.New("kubeadm-config").Parse(templateSource) + if err != nil { + return "", errors.Wrap(err, "failed to parse config template") + } + + // derive any automatic fields if not supplied + data.Derive() + + // Kubeadm has its own feature-gate for dual stack + // we need to enable it for Kubernetes version 1.20 only + // dual-stack is only supported in 1.20+ + // TODO: remove this when 1.20 is EOL or we no longer support + // dual-stack for 1.20 in KIND + if ver.LessThan(version.MustParseSemantic("v1.21.0")) && + ver.AtLeast(version.MustParseSemantic("v1.20.0")) { + data.KubeadmFeatureGates = make(map[string]bool) + data.KubeadmFeatureGates["IPv6DualStack"] = true + } + + // before 1.24 kind uses cgroupfs + // after 1.24 kind uses systemd starting in kind v0.13.0 + // before kind v0.13.0 kubernetes 1.24 wasn't released yet + if ver.LessThan(version.MustParseSemantic("v1.24.0")) { + data.CgroupDriver = "cgroupfs" + } + + // execute the template + var buff bytes.Buffer + err = t.Execute(&buff, data) + if err != nil { + return "", errors.Wrap(err, "error executing config template") + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go new file mode 100644 index 000000000..f8a0e0df2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +// Token defines a dummy, well known token for automating TLS bootstrap process +const Token = "abcdef.0123456789abcdef" + +// ObjectName is the name every generated object will have +// I.E. `metadata:\nname: config` +const ObjectName = "config" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go new file mode 100644 index 000000000..f023db7df --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadm contains kubeadm related constants and configuration +package kubeadm diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go new file mode 100644 index 000000000..336212eff --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "bytes" + + yaml "gopkg.in/yaml.v3" + kubeyaml "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +// Encode encodes the cfg to yaml +func Encode(cfg *Config) ([]byte, error) { + // NOTE: kubernetes's yaml library doesn't handle inline fields very well + // so we're not using that to marshal + encoded, err := yaml.Marshal(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed to encode KUBECONFIG") + } + + // normalize with kubernetes's yaml library + // this is not strictly necessary, but it ensures minimal diffs when + // modifying kubeconfig files, which is nice to have + encoded, err = normYaml(encoded) + if err != nil { + return nil, errors.Wrap(err, "failed to normalize KUBECONFIG encoding") + } + + return encoded, nil +} + +// normYaml round trips yaml bytes through sigs.k8s.io/yaml to normalize them +// versus other kubernetes ecosystem yaml output +func normYaml(y []byte) ([]byte, error) { + var unstructured interface{} + if err := kubeyaml.Unmarshal(y, &unstructured); err != nil { + return nil, err + } + encoded, err := kubeyaml.Marshal(&unstructured) + if err != nil { + return nil, err + } + // special case: don't write anything when empty + if bytes.Equal(encoded, []byte("{}\n")) { + return []byte{}, nil + } + return encoded, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go new file mode 100644 index 000000000..b92393083 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "sigs.k8s.io/kind/pkg/errors" +) + +// KINDClusterKey identifies kind clusters in kubeconfig files +func KINDClusterKey(clusterName string) string { + return "kind-" + clusterName +} + +// checkKubeadmExpectations validates that a kubeadm created KUBECONFIG meets +// our expectations, namely on the number of entries +func checkKubeadmExpectations(cfg *Config) error { + if len(cfg.Clusters) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one cluster, but read %d", len(cfg.Clusters)) + } + if len(cfg.Users) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one user, but read %d", len(cfg.Users)) + } + if len(cfg.Contexts) != 1 { + return errors.Errorf("kubeadm KUBECONFIG should have one context, but read %d", len(cfg.Contexts)) + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go new file mode 100644 index 000000000..41d1be27f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path/filepath" +) + +// these are from +// https://github.com/kubernetes/client-go/blob/611184f7c43ae2d520727f01d49620c7ed33412d/tools/clientcmd/loader.go#L439-L440 + +func lockFile(filename string) error { + // Make sure the dir exists before we try to create a lock file. + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) + if err != nil { + return err + } + f.Close() + return nil +} + +func unlockFile(filename string) error { + return os.Remove(lockName(filename)) +} + +func lockName(filename string) string { + return filename + ".lock" +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go new file mode 100644 index 000000000..ec4a2164d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + + "sigs.k8s.io/kind/pkg/errors" +) + +// WriteMerged writes a kind kubeconfig (see KINDFromRawKubeadm) into configPath +// merging with the existing contents if any and setting the current context to +// the kind config's current context. +func WriteMerged(kindConfig *Config, explicitConfigPath string) error { + // figure out what filepath we should use + configPath := pathForMerge(explicitConfigPath, os.Getenv) + + // lock config file the same as client-go + if err := lockFile(configPath); err != nil { + return errors.Wrap(err, "failed to lock config file") + } + defer func() { + _ = unlockFile(configPath) + }() + + // read in existing + existing, err := read(configPath) + if err != nil { + return errors.Wrap(err, "failed to get kubeconfig to merge") + } + + // merge with kind kubeconfig + if err := merge(existing, kindConfig); err != nil { + return err + } + + // write back out + return write(existing, configPath) +} + +// merge kind config into an existing config +func merge(existing, kind *Config) error { + // verify assumptions about kubeadm / kind kubeconfigs + if err := checkKubeadmExpectations(kind); err != nil { + return err + } + + // insert or append cluster entry + shouldAppend := true + for i := range existing.Clusters { + if existing.Clusters[i].Name == kind.Clusters[0].Name { + existing.Clusters[i] = kind.Clusters[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Clusters = append(existing.Clusters, kind.Clusters[0]) + } + + // insert or append user entry + shouldAppend = true + for i := range existing.Users { + if existing.Users[i].Name == kind.Users[0].Name { + existing.Users[i] = kind.Users[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Users = append(existing.Users, kind.Users[0]) + } + + // insert or append context entry + shouldAppend = true + for i := range existing.Contexts { + if existing.Contexts[i].Name == kind.Contexts[0].Name { + existing.Contexts[i] = kind.Contexts[0] + shouldAppend = false + } + } + if shouldAppend { + existing.Contexts = append(existing.Contexts, kind.Contexts[0]) + } + + // set the current context + existing.CurrentContext = kind.CurrentContext + + // TODO: We should not need this, but it allows broken clients that depend + // on apiVersion and kind to work. Notably the upstream javascript client. + // See: https://github.com/kubernetes-sigs/kind/issues/1242 + if len(existing.OtherFields) == 0 { + // TODO: Should we be deep-copying? for now we don't need to + // and doing so would be a pain (re and de-serialize maybe?) :shrug: + existing.OtherFields = kind.OtherFields + } + + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go new file mode 100644 index 000000000..ab55792c4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go @@ -0,0 +1,167 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path" + "path/filepath" + "runtime" + + "sigs.k8s.io/kind/pkg/internal/sets" +) + +const kubeconfigEnv = "KUBECONFIG" + +/* +paths returns the list of paths to be considered for kubeconfig files +where explicitPath is the value of --kubeconfig + +# Logic based on kubectl + +https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands + +- If the --kubeconfig flag is set, then only that file is loaded. The flag may only be set once and no merging takes place. + +- If $KUBECONFIG environment variable is set, then it is used as a list of paths (normal path delimiting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. - If no files in the chain exist, then it creates the last file in the list. + +- Otherwise, ${HOME}/.kube/config is used and no merging takes place. +*/ +func paths(explicitPath string, getEnv func(string) string) []string { + if explicitPath != "" { + return []string{explicitPath} + } + + paths := discardEmptyAndDuplicates( + filepath.SplitList(getEnv(kubeconfigEnv)), + ) + if len(paths) != 0 { + return paths + } + + return []string{path.Join(homeDir(runtime.GOOS, getEnv), ".kube", "config")} +} + +// pathForMerge returns the file that kubectl would merge into +func pathForMerge(explicitPath string, getEnv func(string) string) string { + // find the first file that exists + p := paths(explicitPath, getEnv) + if len(p) == 1 { + return p[0] + } + for _, filename := range p { + if fileExists(filename) { + return filename + } + } + // otherwise the last file + return p[len(p)-1] +} + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +func discardEmptyAndDuplicates(paths []string) []string { + seen := sets.NewString() + kept := 0 + for _, p := range paths { + if p != "" && !seen.Has(p) { + paths[kept] = p + kept++ + seen.Insert(p) + } + } + return paths[:kept] +} + +// homeDir returns the home directory for the current user. +// On Windows: +// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. +// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. +// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. +// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. +// NOTE this is from client-go. Rather than pull in client-go for this one +// standalone method, we have a fork here. +// https://github.com/kubernetes/client-go/blob/6d7018244d72350e2e8c4a19ccdbe4c8083a9143/util/homedir/homedir.go +// We've modified this to require injecting os.Getenv and runtime.GOOS as a dependencies for testing purposes +func homeDir(GOOS string, getEnv func(string) string) string { + if GOOS == "windows" { + home := getEnv("HOME") + homeDriveHomePath := "" + if homeDrive, homePath := getEnv("HOMEDRIVE"), getEnv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDriveHomePath = homeDrive + homePath + } + userProfile := getEnv("USERPROFILE") + + // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. + // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. + for _, p := range []string{home, homeDriveHomePath, userProfile} { + if len(p) == 0 { + continue + } + if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { + continue + } + return p + } + + firstSetPath := "" + firstExistingPath := "" + + // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools + for _, p := range []string{home, userProfile, homeDriveHomePath} { + if len(p) == 0 { + continue + } + if len(firstSetPath) == 0 { + // remember the first path that is set + firstSetPath = p + } + info, err := os.Stat(p) + if err != nil { + continue + } + if len(firstExistingPath) == 0 { + // remember the first path that exists + firstExistingPath = p + } + if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { + // return first path that is writeable + return p + } + } + + // If none are writeable, return first location that exists + if len(firstExistingPath) > 0 { + return firstExistingPath + } + + // If none exist, return first location that is set + if len(firstSetPath) > 0 { + return firstSetPath + } + + // We've got nothing + return "" + } + return getEnv("HOME") +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go new file mode 100644 index 000000000..d483d106e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "io" + "os" + + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/errors" +) + +// KINDFromRawKubeadm returns a kind kubeconfig derived from the raw kubeadm kubeconfig, +// the kind clusterName, and the server. +// server is ignored if unset. +func KINDFromRawKubeadm(rawKubeadmKubeConfig, clusterName, server string) (*Config, error) { + cfg := &Config{} + if err := yaml.Unmarshal([]byte(rawKubeadmKubeConfig), cfg); err != nil { + return nil, err + } + + // verify assumptions about kubeadm kubeconfigs + if err := checkKubeadmExpectations(cfg); err != nil { + return nil, err + } + + // compute unique kubeconfig key for this cluster + key := KINDClusterKey(clusterName) + + // use the unique key for all named references + cfg.Clusters[0].Name = key + cfg.Users[0].Name = key + cfg.Contexts[0].Name = key + cfg.Contexts[0].Context.User = key + cfg.Contexts[0].Context.Cluster = key + cfg.CurrentContext = key + + // patch server field if server was set + if server != "" { + cfg.Clusters[0].Cluster.Server = server + } + + return cfg, nil +} + +// read loads a KUBECONFIG file from configPath +func read(configPath string) (*Config, error) { + // try to open, return default if no such file + f, err := os.Open(configPath) + if os.IsNotExist(err) { + return &Config{}, nil + } else if err != nil { + return nil, errors.WithStack(err) + } + + // otherwise read in and deserialize + cfg := &Config{} + rawExisting, err := io.ReadAll(f) + if err != nil { + return nil, errors.WithStack(err) + } + if err := yaml.Unmarshal(rawExisting, cfg); err != nil { + return nil, errors.WithStack(err) + } + + return cfg, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go new file mode 100644 index 000000000..e5fed9556 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + + "sigs.k8s.io/kind/pkg/errors" +) + +// RemoveKIND removes the kind cluster kindClusterName from the KUBECONFIG +// files at configPaths +func RemoveKIND(kindClusterName string, explicitPath string) error { + // remove kind from each if present + for _, configPath := range paths(explicitPath, os.Getenv) { + if err := func(configPath string) error { + // lock before modifying + if err := lockFile(configPath); err != nil { + return errors.Wrap(err, "failed to lock config file") + } + defer func(configPath string) { + _ = unlockFile(configPath) + }(configPath) + + // read in existing + existing, err := read(configPath) + if err != nil { + return errors.Wrap(err, "failed to read kubeconfig to remove KIND entry") + } + + // remove the kind cluster from the config + if remove(existing, kindClusterName) { + // write out the updated config if we modified anything + if err := write(existing, configPath); err != nil { + return err + } + } + + return nil + }(configPath); err != nil { + return err + } + } + return nil +} + +// remove drops kindClusterName entries from the cfg +func remove(cfg *Config, kindClusterName string) bool { + mutated := false + + // get kind cluster identifier + key := KINDClusterKey(kindClusterName) + + // filter out kind cluster from clusters + kept := 0 + for _, c := range cfg.Clusters { + if c.Name != key { + cfg.Clusters[kept] = c + kept++ + } else { + mutated = true + } + } + cfg.Clusters = cfg.Clusters[:kept] + + // filter out kind cluster from users + kept = 0 + for _, u := range cfg.Users { + if u.Name != key { + cfg.Users[kept] = u + kept++ + } else { + mutated = true + } + } + cfg.Users = cfg.Users[:kept] + + // filter out kind cluster from contexts + kept = 0 + for _, c := range cfg.Contexts { + if c.Name != key { + cfg.Contexts[kept] = c + kept++ + } else { + mutated = true + } + } + cfg.Contexts = cfg.Contexts[:kept] + + // unset current context if it points to this cluster + if cfg.CurrentContext == key { + cfg.CurrentContext = "" + mutated = true + } + + return mutated +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go new file mode 100644 index 000000000..1da5df545 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +/* +NOTE: all of these types are based on the upstream v1 types from client-go +https://github.com/kubernetes/client-go/blob/0bdba2f9188006fc64057c2f6d82a0f9ee0ee422/tools/clientcmd/api/v1/types.go + +We've forked them to: +- remove types and fields kind does not need to inspect / modify +- generically support fields kind doesn't inspect / modify using yaml.v3 +- have clearer names (AuthInfo -> User) +*/ + +// Config represents a KUBECONFIG, with the fields kind is likely to use +// Other fields are handled as unstructured data purely read for writing back +// to disk via the OtherFields field +type Config struct { + // Clusters is a map of referenceable names to cluster configs + Clusters []NamedCluster `yaml:"clusters,omitempty"` + // Users is a map of referenceable names to user configs + Users []NamedUser `yaml:"users,omitempty"` + // Contexts is a map of referenceable names to context configs + Contexts []NamedContext `yaml:"contexts,omitempty"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `yaml:"current-context,omitempty"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `yaml:"name"` + // Cluster holds the cluster information + Cluster Cluster `yaml:"cluster"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `yaml:"server,omitempty"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} + +// NamedUser relates nicknames to user information +type NamedUser struct { + // Name is the nickname for this User + Name string `yaml:"name"` + // User holds the user information + // We do not touch this and merely write it back + User map[string]interface{} `yaml:"user"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `yaml:"name"` + // Context holds the context information + Context Context `yaml:"context"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `yaml:"cluster"` + // User is the name of the User for this context + User string `yaml:"user"` + // OtherFields contains fields kind does not inspect or modify, these are + // read purely for writing back + OtherFields map[string]interface{} `yaml:",inline,omitempty"` +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go new file mode 100644 index 000000000..1e8de4ecb --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeconfig + +import ( + "os" + "path/filepath" + + "sigs.k8s.io/kind/pkg/errors" +) + +// write writes cfg to configPath +// it will ensure the directories in the path if necessary +func write(cfg *Config, configPath string) error { + encoded, err := Encode(cfg) + if err != nil { + return err + } + // NOTE: 0755 / 0600 are to match client-go + dir := filepath.Dir(configPath) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return errors.Wrap(err, "failed to create directory for KUBECONFIG") + } + } + if err := os.WriteFile(configPath, encoded, 0600); err != nil { + return errors.Wrap(err, "failed to write KUBECONFIG") + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go new file mode 100644 index 000000000..c5e4c2ace --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeconfig provides utilities kind uses internally to manage +// kind cluster kubeconfigs +package kubeconfig + +import ( + "bytes" + + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + + // this package has slightly more generic kubeconfig helpers + // and minimal dependencies on the rest of kind + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" +) + +// Export exports the kubeconfig given the cluster context and a path to write it to +// This will always be an external kubeconfig +func Export(p providers.Provider, name, explicitPath string, external bool) error { + cfg, err := get(p, name, external) + if err != nil { + return err + } + return kubeconfig.WriteMerged(cfg, explicitPath) +} + +// Remove removes clusterName from the kubeconfig paths detected based on +// either explicitPath being set or $KUBECONFIG or $HOME/.kube/config, following +// the rules set by kubectl +// clusterName must identify a kind cluster. +func Remove(clusterName, explicitPath string) error { + return kubeconfig.RemoveKIND(clusterName, explicitPath) +} + +// Get returns the kubeconfig for the cluster +// external controls if the internal IP address is used or the host endpoint +func Get(p providers.Provider, name string, external bool) (string, error) { + cfg, err := get(p, name, external) + if err != nil { + return "", err + } + b, err := kubeconfig.Encode(cfg) + if err != nil { + return "", err + } + return string(b), err +} + +// ContextForCluster returns the context name for a kind cluster based on +// its name. This key is used for all list entries of kind clusters +func ContextForCluster(kindClusterName string) string { + return kubeconfig.KINDClusterKey(kindClusterName) +} + +func get(p providers.Provider, name string, external bool) (*kubeconfig.Config, error) { + // find a control plane node to get the kubeadm config from + n, err := p.ListNodes(name) + if err != nil { + return nil, err + } + var buff bytes.Buffer + nodes, err := nodeutils.ControlPlaneNodes(n) + if err != nil { + return nil, err + } + if len(nodes) < 1 { + return nil, errors.Errorf("could not locate any control plane nodes for cluster named '%s'. "+ + "Use the --name option to select a different cluster", name) + } + node := nodes[0] + + // grab kubeconfig version from the node + if err := node.Command("cat", "/etc/kubernetes/admin.conf").SetStdout(&buff).Run(); err != nil { + return nil, errors.Wrap(err, "failed to get cluster internal kubeconfig") + } + + // if we're doing external we need to override the server endpoint + server := "" + if external { + endpoint, err := p.GetAPIServerEndpoint(name) + if err != nil { + return nil, err + } + server = "https://" + endpoint + } + + // actually encode + return kubeconfig.KINDFromRawKubeadm(buff.String(), name, server) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go new file mode 100644 index 000000000..185565f66 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +import ( + "bytes" + "text/template" + + "sigs.k8s.io/kind/pkg/errors" +) + +// ConfigData is supplied to the loadbalancer config template +type ConfigData struct { + ControlPlanePort int + BackendServers map[string]string + IPv6 bool +} + +// DefaultConfigTemplate is the loadbalancer config template +const DefaultConfigTemplate = `# generated by kind +global + log /dev/log local0 + log /dev/log local1 notice + daemon + # limit memory usage to approximately 18 MB + maxconn 100000 + +resolvers docker + nameserver dns 127.0.0.11:53 + +defaults + log global + mode tcp + option dontlognull + # TODO: tune these + timeout connect 5000 + timeout client 50000 + timeout server 50000 + # allow to boot despite dns don't resolve backends + default-server init-addr none + +frontend control-plane + bind *:{{ .ControlPlanePort }} + {{ if .IPv6 -}} + bind :::{{ .ControlPlanePort }}; + {{- end }} + default_backend kube-apiservers + +backend kube-apiservers + option httpchk GET /healthz + # TODO: we should be verifying (!) + {{range $server, $address := .BackendServers}} + server {{ $server }} {{ $address }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }} + {{- end}} +` + +// Config returns a kubeadm config generated from config data, in particular +// the kubernetes version +func Config(data *ConfigData) (config string, err error) { + t, err := template.New("loadbalancer-config").Parse(DefaultConfigTemplate) + if err != nil { + return "", errors.Wrap(err, "failed to parse config template") + } + // execute the template + var buff bytes.Buffer + err = t.Execute(&buff, data) + if err != nil { + return "", errors.Wrap(err, "error executing config template") + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go new file mode 100644 index 000000000..3600b338b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancer + +// Image defines the loadbalancer image:tag +const Image = "docker.io/kindest/haproxy:v20230606-42a2262b" + +// ConfigPath defines the path to the config file in the image +const ConfigPath = "/usr/local/etc/haproxy/haproxy.cfg" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go new file mode 100644 index 000000000..6e53f388d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loadbalancer contains external loadbalancer related constants and configuration +package loadbalancer diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go new file mode 100644 index 000000000..000960370 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package logs contains tooling for obtaining cluster logs +package logs diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go new file mode 100644 index 000000000..2dc5941ef --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path" + "path/filepath" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" +) + +// DumpDir dumps the dir nodeDir on the node to the dir hostDir on the host +func DumpDir(logger log.Logger, node nodes.Node, nodeDir, hostDir string) (err error) { + cmd := node.Command( + "sh", "-c", + // Tar will exit 1 if a file changed during the archival. + // We don't care about this, so we're invoking it in a shell + // And masking out 1 as a return value. + // Fatal errors will return exit code 2. + // http://man7.org/linux/man-pages/man1/tar.1.html#RETURN_VALUE + fmt.Sprintf( + `tar --hard-dereference -C %s -chf - . || (r=$?; [ $r -eq 1 ] || exit $r)`, + shellescape.Quote(path.Clean(nodeDir)+"/"), + ), + ) + + return exec.RunWithStdoutReader(cmd, func(outReader io.Reader) error { + if err := untar(logger, outReader, hostDir); err != nil { + return errors.Wrapf(err, "Untarring %q: %v", nodeDir, err) + } + return nil + }) +} + +// untar reads the tar file from r and writes it into dir. +func untar(logger log.Logger, r io.Reader, dir string) (err error) { + tr := tar.NewReader(r) + for { + f, err := tr.Next() + + switch { + case err == io.EOF: + // drain the reader, which may have trailing null bytes + // we don't want to leave the writer hanging + _, err := io.Copy(io.Discard, r) + return err + case err != nil: + return errors.Wrapf(err, "tar reading error: %v", err) + case f == nil: + continue + } + + rel := filepath.FromSlash(f.Name) + abs := filepath.Join(dir, rel) + + switch f.Typeflag { + case tar.TypeReg: + wf, err := os.OpenFile(abs, os.O_CREATE|os.O_RDWR, os.FileMode(f.Mode)) + if err != nil { + return err + } + n, err := io.Copy(wf, tr) + if closeErr := wf.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return errors.Errorf("error writing to %s: %v", abs, err) + } + if n != f.Size { + return errors.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size) + } + case tar.TypeDir: + if _, err := os.Stat(abs); err != nil { + if err := os.MkdirAll(abs, 0755); err != nil { + return err + } + } + default: + logger.Warnf("tar file entry %s contained unsupported file type %v", f.Name, f.Typeflag) + } + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go new file mode 100644 index 000000000..9ef3abd04 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go @@ -0,0 +1,85 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "bufio" + "context" + "os" + "regexp" + "sync" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +var nodeReachedCgroupsReadyRegexp *regexp.Regexp +var nodeReachedCgroupsReadyRegexpCompileOnce sync.Once + +// NodeReachedCgroupsReadyRegexp returns a regexp for use with WaitUntilLogRegexpMatches +// +// This is used to avoid "ERROR: this script needs /sys/fs/cgroup/cgroup.procs to be empty (for writing the top-level cgroup.subtree_control)" +// See https://github.com/kubernetes-sigs/kind/issues/2409 +// +// This pattern matches either "detected cgroupv1" from the kind node image's entrypoint logs +// or "Multi-User System" target if is using cgroups v2, +// so that `docker exec` can be executed safely without breaking cgroup v2 hierarchy. +func NodeReachedCgroupsReadyRegexp() *regexp.Regexp { + nodeReachedCgroupsReadyRegexpCompileOnce.Do(func() { + // This is an approximation, see: https://github.com/kubernetes-sigs/kind/pull/2421 + nodeReachedCgroupsReadyRegexp = regexp.MustCompile("Reached target .*Multi-User System.*|detected cgroup v1") + }) + return nodeReachedCgroupsReadyRegexp +} + +// WaitUntilLogRegexpMatches waits until logCmd output produces a line matching re. +// It will use logCtx to determine if the logCmd deadline was exceeded for producing +// the most useful error message in failure cases, logCtx should be the context +// supplied to create logCmd with CommandContext +func WaitUntilLogRegexpMatches(logCtx context.Context, logCmd exec.Cmd, re *regexp.Regexp) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + logCmd.SetStdout(pw) + logCmd.SetStderr(pw) + + defer pr.Close() + cmdErrC := make(chan error, 1) + go func() { + defer pw.Close() + cmdErrC <- logCmd.Run() + }() + + sc := bufio.NewScanner(pr) + for sc.Scan() { + line := sc.Text() + if re.MatchString(line) { + return nil + } + } + + // when we timeout the process will have been killed due to the timeout, which is not interesting + // in other cases if the command errored this may be a useful error + if ctxErr := logCtx.Err(); ctxErr != context.DeadlineExceeded { + if cmdErr := <-cmdErrC; cmdErr != nil { + return errors.Wrap(cmdErr, "failed to read logs") + } + } + // otherwise generic error + return errors.Errorf("could not find a log line that matches %q", re.String()) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go new file mode 100644 index 000000000..131218f19 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// APIServerInternalPort defines the port where the control plane is listening +// _inside_ the node network +const APIServerInternalPort = 6443 diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go new file mode 100644 index 000000000..dd4311626 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package common contains common code for implementing providers +package common diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go new file mode 100644 index 000000000..5c50da94c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "net" +) + +// PortOrGetFreePort is a helper that either returns the provided port +// if valid or returns a new free port on listenAddr +func PortOrGetFreePort(port int32, listenAddr string) (int32, error) { + // in the case of -1 we actually want to pass 0 to the backend to let it pick + if port == -1 { + return 0, nil + } + // in the case of 0 (unset) we want kind to pick one and supply it to the backend + if port == 0 { + return GetFreePort(listenAddr) + } + // otherwise keep the port + return port, nil +} + +// GetFreePort is a helper used to get a free TCP port on the host +func GetFreePort(listenAddr string) (int32, error) { + dummyListener, err := net.Listen("tcp", net.JoinHostPort(listenAddr, "0")) + if err != nil { + return 0, err + } + defer dummyListener.Close() + port := dummyListener.Addr().(*net.TCPAddr).Port + return int32(port), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go new file mode 100644 index 000000000..080a337f5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/sets" +) + +// RequiredNodeImages returns the set of _node_ images specified by the config +// This does not include the loadbalancer image, and is only used to improve +// the UX by explicit pulling the node images prior to running +func RequiredNodeImages(cfg *config.Cluster) sets.String { + images := sets.NewString() + for _, node := range cfg.Nodes { + images.Insert(node.Image) + } + return images +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go new file mode 100644 index 000000000..ab2b4c376 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go @@ -0,0 +1,59 @@ +package common + +import ( + "os" + "path/filepath" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// CollectLogs provides the common functionality +// to get various debug info from the node +func CollectLogs(n nodes.Node, dir string) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := FileOnHost(filepath.Join(dir, path)) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + + return errors.AggregateConcurrent([]func() error{ + // record info about the node container + execToPathFn( + n.Command("cat", "/kind/version"), + "kubernetes-version.txt", + ), + execToPathFn( + n.Command("journalctl", "--no-pager"), + "journal.log", + ), + execToPathFn( + n.Command("journalctl", "--no-pager", "-u", "kubelet.service"), + "kubelet.log", + ), + execToPathFn( + n.Command("journalctl", "--no-pager", "-u", "containerd.service"), + "containerd.log", + ), + execToPathFn( + n.Command("crictl", "images"), + "images.log", + ), + }) +} + +// FileOnHost is a helper to create a file at path +// even if the parent directory doesn't exist +// in which case it will be created with ModePerm +func FileOnHost(path string) (*os.File, error) { + if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil { + return nil, err + } + return os.Create(path) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go new file mode 100644 index 000000000..9abade8ce --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" +) + +// MakeNodeNamer returns a func(role string)(nodeName string) +// used to name nodes based on their role and the clusterName +func MakeNodeNamer(clusterName string) func(string) string { + counter := make(map[string]int) + return func(role string) string { + count := 1 + suffix := "" + if v, ok := counter[role]; ok { + count += v + suffix = fmt.Sprintf("%d", count) + } + counter[role] = count + return fmt.Sprintf("%s-%s%s", clusterName, role, suffix) + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go new file mode 100644 index 000000000..4e896b6e1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "os" + "strings" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +const ( + // HTTPProxy is the HTTP_PROXY environment variable key + HTTPProxy = "HTTP_PROXY" + // HTTPSProxy is the HTTPS_PROXY environment variable key + HTTPSProxy = "HTTPS_PROXY" + // NOProxy is the NO_PROXY environment variable key + NOProxy = "NO_PROXY" +) + +// GetProxyEnvs returns a map of proxy environment variables to their values +// If proxy settings are set, NO_PROXY is modified to include the cluster subnets +func GetProxyEnvs(cfg *config.Cluster) map[string]string { + return getProxyEnvs(cfg, os.Getenv) +} + +func getProxyEnvs(cfg *config.Cluster, getEnv func(string) string) map[string]string { + envs := make(map[string]string) + for _, name := range []string{HTTPProxy, HTTPSProxy, NOProxy} { + val := getEnv(name) + if val == "" { + val = getEnv(strings.ToLower(name)) + } + if val != "" { + envs[name] = val + envs[strings.ToLower(name)] = val + } + } + // Specifically add the cluster subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + noProxy := envs[NOProxy] + if noProxy != "" { + noProxy += "," + } + noProxy += cfg.Networking.ServiceSubnet + "," + cfg.Networking.PodSubnet + envs[NOProxy] = noProxy + envs[strings.ToLower(NOProxy)] = noProxy + } + return envs +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS new file mode 100644 index 000000000..33e190320 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS @@ -0,0 +1,2 @@ +labels: +- area/provider/docker diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go new file mode 100644 index 000000000..40fd79d9d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +// clusterLabelKey is applied to each "node" docker container for identification +const clusterLabelKey = "io.x-k8s.kind.cluster" + +// nodeRoleLabelKey is applied to each "node" docker container for categorization +// of nodes by role +const nodeRoleLabelKey = "io.x-k8s.kind.role" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go new file mode 100644 index 000000000..8ad37d073 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// ensureNodeImages ensures that the node images used by the create +// configuration are present +func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster) error { + // pull each required image + for _, image := range common.RequiredNodeImages(cfg).List() { + // prints user friendly message + friendlyImageName, image := sanitizeImage(image) + status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName)) + if _, err := pullIfNotPresent(logger, image, 4); err != nil { + status.End(false) + return err + } + } + return nil +} + +// pullIfNotPresent will pull an image if it is not present locally +// retrying up to retries times +// it returns true if it attempted to pull, and any errors from pulling +func pullIfNotPresent(logger log.Logger, image string, retries int) (pulled bool, err error) { + // TODO(bentheelder): switch most (all) of the logging here to debug level + // once we have configurable log levels + // if this did not return an error, then the image exists locally + cmd := exec.Command("docker", "inspect", "--type=image", image) + if err := cmd.Run(); err == nil { + logger.V(1).Infof("Image: %s present locally", image) + return false, nil + } + // otherwise try to pull it + return true, pull(logger, image, retries) +} + +// pull pulls an image, retrying up to retries times +func pull(logger log.Logger, image string, retries int) error { + logger.V(1).Infof("Pulling image: %s ...", image) + err := exec.Command("docker", "pull", image).Run() + // retry pulling up to retries times if necessary + if err != nil { + for i := 0; i < retries; i++ { + time.Sleep(time.Second * time.Duration(i+1)) + logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err) + // TODO(bentheelder): add some backoff / sleep? + err = exec.Command("docker", "pull", image).Run() + if err == nil { + break + } + } + } + return errors.Wrapf(err, "failed to pull image %q", image) +} + +// sanitizeImage is a helper to return human readable image name and +// the docker pullable image name from the provided image +func sanitizeImage(image string) (string, string) { + if strings.Contains(image, "@sha256:") { + return strings.Split(image, "@sha256:")[0], image + } + return image, image +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go new file mode 100644 index 000000000..f43284610 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go @@ -0,0 +1,329 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "net" + "regexp" + "sort" + "strconv" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// This may be overridden by KIND_EXPERIMENTAL_DOCKER_NETWORK env, +// experimentally... +// +// By default currently picking a single network is equivalent to the previous +// behavior *except* that we moved from the default bridge to a user defined +// network because the default bridge is actually special versus any other +// docker network and lacks the embedded DNS +// +// For now this also makes it easier for apps to join the same network, and +// leaves users with complex networking desires to create and manage their own +// networks. +const fixedNetworkName = "kind" + +// ensureNetwork checks if docker network by name exists, if not it creates it +func ensureNetwork(name string) error { + // check if network exists already and remove any duplicate networks + exists, err := removeDuplicateNetworks(name) + if err != nil { + return err + } + + // network already exists, we're good + // TODO: the network might already exist and not have ipv6 ... :| + // discussion: https://github.com/kubernetes-sigs/kind/pull/1508#discussion_r414594198 + if exists { + return nil + } + + // Generate unique subnet per network based on the name + // obtained from the ULA fc00::/8 range + // Use the MTU configured for the docker default network + // Make N attempts with "probing" in case we happen to collide + subnet := generateULASubnetFromName(name, 0) + mtu := getDefaultNetworkMTU() + err = createNetworkNoDuplicates(name, subnet, mtu) + if err == nil { + // Success! + return nil + } + + // On the first try check if ipv6 fails entirely on this machine + // https://github.com/kubernetes-sigs/kind/issues/1544 + // Otherwise if it's not a pool overlap error, fail + // If it is, make more attempts below + if isIPv6UnavailableError(err) { + // only one attempt, IPAM is automatic in ipv4 only + return createNetworkNoDuplicates(name, "", mtu) + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll start trying with different subnets + } else { + // unknown error ... + return err + } + + // keep trying for ipv6 subnets + const maxAttempts = 5 + for attempt := int32(1); attempt < maxAttempts; attempt++ { + subnet := generateULASubnetFromName(name, attempt) + err = createNetworkNoDuplicates(name, subnet, mtu) + if err == nil { + // success! + return nil + } + if isPoolOverlapError(err) { + // pool overlap suggests perhaps another process created the network + // check if network exists already and remove any duplicate networks + exists, err := checkIfNetworkExists(name) + if err != nil { + return err + } + if exists { + return nil + } + // otherwise we'll try again + continue + } + // unknown error ... + return err + } + return errors.New("exhausted attempts trying to find a non-overlapping subnet") +} + +func createNetworkNoDuplicates(name, ipv6Subnet string, mtu int) error { + if err := createNetwork(name, ipv6Subnet, mtu); err != nil && !isNetworkAlreadyExistsError(err) { + return err + } + _, err := removeDuplicateNetworks(name) + return err +} + +func removeDuplicateNetworks(name string) (bool, error) { + networks, err := sortedNetworksWithName(name) + if err != nil { + return false, err + } + if len(networks) > 1 { + if err := deleteNetworks(networks[1:]...); err != nil && !isOnlyErrorNoSuchNetwork(err) { + return false, err + } + } + return len(networks) > 0, nil +} + +func createNetwork(name, ipv6Subnet string, mtu int) error { + args := []string{"network", "create", "-d=bridge", + "-o", "com.docker.network.bridge.enable_ip_masquerade=true", + } + if mtu > 0 { + args = append(args, "-o", fmt.Sprintf("com.docker.network.driver.mtu=%d", mtu)) + } + if ipv6Subnet != "" { + args = append(args, "--ipv6", "--subnet", ipv6Subnet) + } + args = append(args, name) + return exec.Command("docker", args...).Run() +} + +// getDefaultNetworkMTU obtains the MTU from the docker default network +func getDefaultNetworkMTU() int { + cmd := exec.Command("docker", "network", "inspect", "bridge", + "-f", `{{ index .Options "com.docker.network.driver.mtu" }}`) + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return 0 + } + mtu, err := strconv.Atoi(lines[0]) + if err != nil { + return 0 + } + return mtu +} + +func sortedNetworksWithName(name string) ([]string, error) { + // query which networks exist with the name + ids, err := networksWithName(name) + if err != nil { + return nil, err + } + // we can skip sorting if there are less than 2 + if len(ids) < 2 { + return ids, nil + } + // inspect them to get more detail for sorting + networks, err := inspectNetworks(ids) + if err != nil { + return nil, err + } + // deterministically sort networks + // NOTE: THIS PART IS IMPORTANT! + sortNetworkInspectEntries(networks) + // return network IDs + sortedIDs := make([]string, 0, len(networks)) + for i := range networks { + sortedIDs = append(sortedIDs, networks[i].ID) + } + return sortedIDs, nil +} + +func sortNetworkInspectEntries(networks []networkInspectEntry) { + sort.Slice(networks, func(i, j int) bool { + // we want networks with active containers first + if len(networks[i].Containers) > len(networks[j].Containers) { + return true + } + return networks[i].ID < networks[j].ID + }) +} + +func inspectNetworks(networkIDs []string) ([]networkInspectEntry, error) { + inspectOut, err := exec.Output(exec.Command("docker", append([]string{"network", "inspect"}, networkIDs...)...)) + // NOTE: the caller can detect if the network isn't present in the output anyhow + // we don't want to fail on this here. + if err != nil && !isOnlyErrorNoSuchNetwork(err) { + return nil, err + } + // parse + networks := []networkInspectEntry{} + if err := json.Unmarshal(inspectOut, &networks); err != nil { + return nil, errors.Wrap(err, "failed to decode networks list") + } + return networks, nil +} + +type networkInspectEntry struct { + ID string `json:"Id"` + // NOTE: we don't care about the contents here but we need to parse + // how many entries exist in the containers map + Containers map[string]map[string]string `json:"Containers"` +} + +// networksWithName returns a list of network IDs for networks with this name +func networksWithName(name string) ([]string, error) { + lsOut, err := exec.Output(exec.Command( + "docker", "network", "ls", + "--filter=name=^"+regexp.QuoteMeta(name)+"$", + "--format={{.ID}}", // output as unambiguous IDs + )) + if err != nil { + return nil, err + } + cleaned := strings.TrimSuffix(string(lsOut), "\n") + if cleaned == "" { // avoid returning []string{""} + return nil, nil + } + return strings.Split(cleaned, "\n"), nil +} + +func checkIfNetworkExists(name string) (bool, error) { + out, err := exec.Output(exec.Command( + "docker", "network", "ls", + "--filter=name=^"+regexp.QuoteMeta(name)+"$", + "--format={{.Name}}", + )) + return strings.HasPrefix(string(out), name), err +} + +func isIPv6UnavailableError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Cannot read IPv6 setup for bridge") +} + +func isPoolOverlapError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Pool overlaps with other one on this address space") || strings.Contains(string(rerr.Output), "networks have overlapping") +} + +func isNetworkAlreadyExistsError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: network with name") && strings.Contains(string(rerr.Output), "already exists") +} + +// returns true if: +// - err only contains no such network errors +func isOnlyErrorNoSuchNetwork(err error) bool { + rerr := exec.RunErrorForError(err) + if rerr == nil { + return false + } + // check all lines of output from errored command + b := bytes.NewBuffer(rerr.Output) + for { + l, err := b.ReadBytes('\n') + if err == io.EOF { + break + } else if err != nil { + return false + } + // if the line begins with Error: No such network: it's fine + s := string(l) + if strings.HasPrefix(s, "Error: No such network:") { + continue + } + // other errors are not fine + if strings.HasPrefix(s, "Error: ") { + return false + } + // other line contents should just be network references + } + return true +} + +func deleteNetworks(networks ...string) error { + return exec.Command("docker", append([]string{"network", "rm"}, networks...)...).Run() +} + +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go new file mode 100644 index 000000000..9bad7ed01 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// nodes.Node implementation for the docker provider +type node struct { + name string +} + +func (n *node) String() string { + return n.name +} + +func (n *node) Role() (string, error) { + cmd := exec.Command("docker", "inspect", + "--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey), + n.name, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get role for node") + } + if len(lines) != 1 { + return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines)) + } + return lines[0], nil +} + +func (n *node) IP() (ipv4 string, ipv6 string, err error) { + // retrieve the IP address of the node using docker inspect + cmd := exec.Command("docker", "inspect", + "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}", + n.name, // ... against the "node" container + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", "", errors.Wrap(err, "failed to get container details") + } + if len(lines) != 1 { + return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + ips := strings.Split(lines[0], ",") + if len(ips) != 2 { + return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips)) + } + return ips[0], ips[1], nil +} + +func (n *node) Command(command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + } +} + +func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + ctx: ctx, + } +} + +// nodeCmd implements exec.Cmd for docker nodes +type nodeCmd struct { + nameOrID string // the container name or ID + command string + args []string + env []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + ctx context.Context +} + +func (c *nodeCmd) Run() error { + args := []string{ + "exec", + // run with privileges so we can remount etc.. + // this might not make sense in the most general sense, but it is + // important to many kind commands + "--privileged", + } + if c.stdin != nil { + args = append(args, + "-i", // interactive so we can supply input + ) + } + // set env + for _, env := range c.env { + args = append(args, "-e", env) + } + // specify the container and command, after this everything will be + // args the command in the container rather than to docker + args = append( + args, + c.nameOrID, // ... against the container + c.command, // with the command specified + ) + args = append( + args, + // finally, with the caller args + c.args..., + ) + var cmd exec.Cmd + if c.ctx != nil { + cmd = exec.CommandContext(c.ctx, "docker", args...) + } else { + cmd = exec.Command("docker", args...) + } + if c.stdin != nil { + cmd.SetStdin(c.stdin) + } + if c.stderr != nil { + cmd.SetStderr(c.stderr) + } + if c.stdout != nil { + cmd.SetStdout(c.stdout) + } + return cmd.Run() +} + +func (c *nodeCmd) SetEnv(env ...string) exec.Cmd { + c.env = env + return c +} + +func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd { + c.stdin = r + return c +} + +func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd { + c.stdout = w + return c +} + +func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd { + c.stderr = w + return c +} + +func (n *node) SerialLogs(w io.Writer) error { + return exec.Command("docker", "logs", n.name).SetStdout(w).SetStderr(w).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go new file mode 100644 index 000000000..b1786fbc9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go @@ -0,0 +1,344 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/sets" +) + +// NewProvider returns a new provider based on executing `docker ...` +func NewProvider(logger log.Logger) providers.Provider { + return &provider{ + logger: logger, + } +} + +// Provider implements provider.Provider +// see NewProvider +type provider struct { + logger log.Logger + info *providers.ProviderInfo +} + +// String implements fmt.Stringer +// NOTE: the value of this should not currently be relied upon for anything! +// This is only used for setting the Node's providerID +func (p *provider) String() string { + return "docker" +} + +// Provision is part of the providers.Provider interface +func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) { + // TODO: validate cfg + // ensure node images are pulled before actually provisioning + if err := ensureNodeImages(p.logger, status, cfg); err != nil { + return err + } + + // ensure the pre-requisite network exists + networkName := fixedNetworkName + if n := os.Getenv("KIND_EXPERIMENTAL_DOCKER_NETWORK"); n != "" { + p.logger.Warn("WARNING: Overriding docker network due to KIND_EXPERIMENTAL_DOCKER_NETWORK") + p.logger.Warn("WARNING: Here be dragons! This is not supported currently.") + networkName = n + } + if err := ensureNetwork(networkName); err != nil { + return errors.Wrap(err, "failed to ensure docker network") + } + + // actually provision the cluster + icons := strings.Repeat("📦 ", len(cfg.Nodes)) + status.Start(fmt.Sprintf("Preparing nodes %s", icons)) + defer func() { status.End(err == nil) }() + + // plan creating the containers + createContainerFuncs, err := planCreation(cfg, networkName) + if err != nil { + return err + } + + // actually create nodes + return errors.UntilErrorConcurrent(createContainerFuncs) +} + +// ListClusters is part of the providers.Provider interface +func (p *provider) ListClusters() ([]string, error) { + cmd := exec.Command("docker", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", "label="+clusterLabelKey, + // format to include the cluster name + "--format", fmt.Sprintf(`{{.Label "%s"}}`, clusterLabelKey), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + return sets.NewString(lines...).List(), nil +} + +// ListNodes is part of the providers.Provider interface +func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) { + cmd := exec.Command("docker", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster), + // format to include the cluster name + "--format", `{{.Names}}`, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list nodes") + } + // convert names to node handles + ret := make([]nodes.Node, 0, len(lines)) + for _, name := range lines { + ret = append(ret, p.node(name)) + } + return ret, nil +} + +// DeleteNodes is part of the providers.Provider interface +func (p *provider) DeleteNodes(n []nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "docker" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + return nil +} + +// GetAPIServerEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + + // if the 'desktop.docker.io/ports//tcp' label is present, + // defer to its value for the api server endpoint + // + // For example: + // "Labels": { + // "desktop.docker.io/ports/6443/tcp": "10.0.1.7:6443", + // } + cmd := exec.Command( + "docker", "inspect", + "--format", fmt.Sprintf( + "{{ index .Config.Labels \"desktop.docker.io/ports/%d/tcp\" }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) == 1 && lines[0] != "" { + return lines[0], nil + } + + // else, retrieve the specific port mapping via NetworkSettings.Ports + cmd = exec.Command( + "docker", "inspect", + "--format", fmt.Sprintf( + "{{ with (index (index .NetworkSettings.Ports \"%d/tcp\") 0) }}{{ printf \"%%s\t%%s\" .HostIp .HostPort }}{{ end }}", common.APIServerInternalPort, + ), + n.String(), + ) + lines, err = exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + parts := strings.Split(lines[0], "\t") + if len(parts) != 2 { + return "", errors.Errorf("network details should only be two parts, got %d", len(parts)) + } + + // join host and port + return net.JoinHostPort(parts[0], parts[1]), nil +} + +// GetAPIServerInternalEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + // NOTE: we're using the nodes's hostnames which are their names + return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil +} + +// node returns a new node handle for this provider +func (p *provider) node(name string) nodes.Node { + return &node{ + name: name, + } +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := common.FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + // construct a slice of methods to collect logs + fns := []func() error{ + // record info about the host docker + execToPathFn( + exec.Command("docker", "info"), + filepath.Join(dir, "docker-info.txt"), + ), + } + + // collect /var/log for each node and plan collecting more logs + var errs []error + for _, n := range nodes { + node := n // https://golang.org/doc/faq#closures_and_goroutines + name := node.String() + path := filepath.Join(dir, name) + if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil { + errs = append(errs, err) + } + + fns = append(fns, + func() error { return common.CollectLogs(node, path) }, + execToPathFn(exec.Command("docker", "inspect", name), filepath.Join(path, "inspect.json")), + func() error { + f, err := common.FileOnHost(filepath.Join(path, "serial.log")) + if err != nil { + return err + } + defer f.Close() + return node.SerialLogs(f) + }, + ) + } + + // run and collect up all errors + errs = append(errs, errors.AggregateConcurrent(fns)) + return errors.NewAggregate(errs) +} + +// Info returns the provider info. +// The info is cached on the first time of the execution. +func (p *provider) Info() (*providers.ProviderInfo, error) { + var err error + if p.info == nil { + p.info, err = info() + } + return p.info, err +} + +// dockerInfo corresponds to `docker info --format '{{json .}}'` +type dockerInfo struct { + CgroupDriver string `json:"CgroupDriver"` // "systemd", "cgroupfs", "none" + CgroupVersion string `json:"CgroupVersion"` // e.g. "2" + MemoryLimit bool `json:"MemoryLimit"` + PidsLimit bool `json:"PidsLimit"` + CPUShares bool `json:"CPUShares"` + SecurityOptions []string `json:"SecurityOptions"` +} + +func info() (*providers.ProviderInfo, error) { + cmd := exec.Command("docker", "info", "--format", "{{json .}}") + out, err := exec.Output(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get docker info") + } + var dInfo dockerInfo + if err := json.Unmarshal(out, &dInfo); err != nil { + return nil, err + } + info := providers.ProviderInfo{ + Cgroup2: dInfo.CgroupVersion == "2", + } + // When CgroupDriver == "none", the MemoryLimit/PidsLimit/CPUShares + // values are meaningless and need to be considered false. + // https://github.com/moby/moby/issues/42151 + if dInfo.CgroupDriver != "none" { + info.SupportsMemoryLimit = dInfo.MemoryLimit + info.SupportsPidsLimit = dInfo.PidsLimit + info.SupportsCPUShares = dInfo.CPUShares + } + for _, o := range dInfo.SecurityOptions { + // o is like "name=seccomp,profile=default", or "name=rootless", + csvReader := csv.NewReader(strings.NewReader(o)) + sliceSlice, err := csvReader.ReadAll() + if err != nil { + return nil, err + } + for _, f := range sliceSlice { + for _, ff := range f { + if ff == "name=rootless" { + info.Rootless = true + } + } + } + } + return &info, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go new file mode 100644 index 000000000..6c644a365 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go @@ -0,0 +1,418 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/fs" + + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// planCreation creates a slice of funcs that will create the containers +func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) { + // we need to know all the names for NO_PROXY + // compute the names first before any actual node details + nodeNamer := common.MakeNodeNamer(cfg.Name) + names := make([]string, len(cfg.Nodes)) + for i, node := range cfg.Nodes { + name := nodeNamer(string(node.Role)) // name the node + names[i] = name + } + haveLoadbalancer := config.ClusterHasImplicitLoadBalancer(cfg) + if haveLoadbalancer { + names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue)) + } + + // these apply to all container creation + genericArgs, err := commonArgs(cfg.Name, cfg, networkName, names) + if err != nil { + return nil, err + } + + // only the external LB should reflect the port if we have multiple control planes + apiServerPort := cfg.Networking.APIServerPort + apiServerAddress := cfg.Networking.APIServerAddress + if haveLoadbalancer { + // TODO: picking ports locally is less than ideal with remote docker + // but this is supposed to be an implementation detail and NOT picking + // them breaks host reboot ... + // For now remote docker + multi control plane is not supported + apiServerPort = 0 // replaced with random ports + apiServerAddress = "127.0.0.1" // only the LB needs to be non-local + // only for IPv6 only clusters + if cfg.Networking.IPFamily == config.IPv6Family { + apiServerAddress = "::1" // only the LB needs to be non-local + } + // plan loadbalancer node + name := names[len(names)-1] + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForLoadBalancer(cfg, name, genericArgs) + if err != nil { + return err + } + return createContainer(name, args) + }) + } + + // plan normal nodes + for i, node := range cfg.Nodes { + node := node.DeepCopy() // copy so we can modify + name := names[i] + + // fixup relative paths, docker can only handle absolute paths + for m := range node.ExtraMounts { + hostPath := node.ExtraMounts[m].HostPath + if !fs.IsAbs(hostPath) { + absHostPath, err := filepath.Abs(hostPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath) + } + node.ExtraMounts[m].HostPath = absHostPath + } + } + + // plan actual creation based on role + switch node.Role { + case config.ControlPlaneRole: + createContainerFuncs = append(createContainerFuncs, func() error { + node.ExtraPortMappings = append(node.ExtraPortMappings, + config.PortMapping{ + ListenAddress: apiServerAddress, + HostPort: apiServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + case config.WorkerRole: + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + default: + return nil, errors.Errorf("unknown node role: %q", node.Role) + } + } + return createContainerFuncs, nil +} + +// commonArgs computes static arguments that apply to all containers +func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNames []string) ([]string, error) { + // standard arguments all nodes containers need, computed once + args := []string{ + "--detach", // run the container detached + "--tty", // allocate a tty for entrypoint logs + // label the node with the cluster ID + "--label", fmt.Sprintf("%s=%s", clusterLabelKey, cluster), + // user a user defined docker network so we get embedded DNS + "--net", networkName, + // Docker supports the following restart modes: + // - no + // - on-failure[:max-retries] + // - unless-stopped + // - always + // https://docs.docker.com/engine/reference/commandline/run/#restart-policies---restart + // + // What we desire is: + // - restart on host / dockerd reboot + // - don't restart for any other reason + // + // This means: + // - no is out of the question ... it never restarts + // - always is a poor choice, we'll keep trying to restart nodes that were + // never going to work + // - unless-stopped will also retry failures indefinitely, similar to always + // except that it won't restart when the container is `docker stop`ed + // - on-failure is not great, we're only interested in restarting on + // reboots, not failures. *however* we can limit the number of retries + // *and* it forgets all state on dockerd restart and retries anyhow. + // - on-failure:0 is what we want .. restart on failures, except max + // retries is 0, so only restart on reboots. + // however this _actually_ means the same thing as always + // so the closest thing is on-failure:1, which will retry *once* + "--restart=on-failure:1", + // this can be enabled by default in docker daemon.json, so we explicitly + // disable it, we want our entrypoint to be PID1, not docker-init / tini + "--init=false", + // note: requires API v1.41+ from Dec 2020 in Docker 20.10.0 + // this is the default with cgroups v2 but not with cgroups v1, unless + // overridden in the daemon --default-cgroupns-mode + // https://github.com/docker/cli/pull/3699#issuecomment-1191675788 + "--cgroupns=private", + } + + // enable IPv6 if necessary + if config.ClusterHasIPv6(cfg) { + args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") + } + + // pass proxy environment variables + proxyEnv, err := getProxyEnv(cfg, networkName, nodeNames) + if err != nil { + return nil, errors.Wrap(err, "proxy setup error") + } + for key, val := range proxyEnv { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, val)) + } + + // handle hosts that have user namespace remapping enabled + if usernsRemap() { + args = append(args, "--userns=host") + } + + // handle Docker on Btrfs or ZFS + // https://github.com/kubernetes-sigs/kind/issues/1416#issuecomment-606514724 + if mountDevMapper() { + args = append(args, "--volume", "/dev/mapper:/dev/mapper") + } + + // enable /dev/fuse explicitly for fuse-overlayfs + // (Rootless Docker does not automatically mount /dev/fuse with --privileged) + if mountFuse() { + args = append(args, "--device", "/dev/fuse") + } + + if cfg.Networking.DNSSearch != nil { + args = append(args, "-e", "KIND_DNS_SEARCH="+strings.Join(*cfg.Networking.DNSSearch, " ")) + } + + return args, nil +} + +func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), + // running containers in a container requires privileged + // NOTE: we could try to replicate this with --cap-add, and use less + // privileges, but this flag also changes some mounts that are necessary + // including some ones docker would otherwise do by default. + // for now this is what we want. in the future we may revisit this. + "--privileged", + "--security-opt", "seccomp=unconfined", // also ignore seccomp + "--security-opt", "apparmor=unconfined", // also ignore apparmor + // runtime temporary storage + "--tmpfs", "/tmp", // various things depend on working /tmp + "--tmpfs", "/run", // systemd wants a writable /run + // runtime persistent storage + // this ensures that E.G. pods, logs etc. are not on the container + // filesystem, which is not only better for performance, but allows + // running kind in kind for "party tricks" + // (please don't depend on doing this though!) + "--volume", "/var", + // some k8s things want to read /lib/modules + "--volume", "/lib/modules:/lib/modules:ro", + // propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script + "-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER", + }, + args..., + ) + + // convert mounts and port mappings to container run args + args = append(args, generateMountBindings(node.ExtraMounts...)...) + mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + switch node.Role { + case config.ControlPlaneRole: + args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf") + } + + // finally, specify the image to run + return append(args, node.Image), nil +} + +func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), + }, + args..., + ) + + // load balancer port mapping + mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily, + config.PortMapping{ + ListenAddress: cfg.Networking.APIServerAddress, + HostPort: cfg.Networking.APIServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + return append(args, loadbalancer.Image), nil +} + +func getProxyEnv(cfg *config.Cluster, networkName string, nodeNames []string) (map[string]string, error) { + envs := common.GetProxyEnvs(cfg) + // Specifically add the docker network subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + subnets, err := getSubnets(networkName) + if err != nil { + return nil, err + } + + noProxyList := append(subnets, envs[common.NOProxy]) + noProxyList = append(noProxyList, nodeNames...) + // Add pod and service dns names to no_proxy to allow in cluster + // Note: this is best effort based on the default CoreDNS spec + // https://github.com/kubernetes/dns/blob/master/docs/specification.md + // Any user created pod/service hostnames, namespaces, custom DNS services + // are expected to be no-proxied by the user explicitly. + noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local") + noProxyJoined := strings.Join(noProxyList, ",") + envs[common.NOProxy] = noProxyJoined + envs[strings.ToLower(common.NOProxy)] = noProxyJoined + } + return envs, nil +} + +func getSubnets(networkName string) ([]string, error) { + format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}` + cmd := exec.Command("docker", "network", "inspect", "-f", format, networkName) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to get subnets") + } + return strings.Split(strings.TrimSpace(lines[0]), " "), nil +} + +// generateMountBindings converts the mount list to a list of args for docker +// ':[:options]', where 'options' +// is a comma-separated list of the following strings: +// 'ro', if the path is read only +// 'Z', if the volume requires SELinux relabeling +func generateMountBindings(mounts ...config.Mount) []string { + args := make([]string, 0, len(mounts)) + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + var attrs []string + if m.Readonly { + attrs = append(attrs, "ro") + } + // Only request relabeling if the pod provides an SELinux context. If the pod + // does not provide an SELinux context relabeling will label the volume with + // the container's randomly allocated MCS label. This would restrict access + // to the volume to the container which mounts it first. + if m.SelinuxRelabel { + attrs = append(attrs, "Z") + } + switch m.Propagation { + case config.MountPropagationNone: + // noop, private is default + case config.MountPropagationBidirectional: + attrs = append(attrs, "rshared") + case config.MountPropagationHostToContainer: + attrs = append(attrs, "rslave") + default: // Falls back to "private" + } + if len(attrs) > 0 { + bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ",")) + } + args = append(args, fmt.Sprintf("--volume=%s", bind)) + } + return args +} + +// generatePortMappings converts the portMappings list to a list of args for docker +func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) { + args := make([]string, 0, len(portMappings)) + for _, pm := range portMappings { + // do provider internal defaulting + // in a future API revision we will handle this at the API level and remove this + if pm.ListenAddress == "" { + switch clusterIPFamily { + case config.IPv4Family, config.DualStackFamily: + pm.ListenAddress = "0.0.0.0" // this is the docker default anyhow + case config.IPv6Family: + pm.ListenAddress = "::" + default: + return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily) + } + } + if string(pm.Protocol) == "" { + pm.Protocol = config.PortMappingProtocolTCP // TCP is the default + } + + // validate that the provider can handle this binding + switch pm.Protocol { + case config.PortMappingProtocolTCP: + case config.PortMappingProtocolUDP: + case config.PortMappingProtocolSCTP: + default: + return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol) + } + + // get a random port if necessary (port = 0) + hostPort, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to get random host port for port mapping") + } + + // generate the actual mapping arg + protocol := string(pm.Protocol) + hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort)) + args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, protocol)) + } + return args, nil +} + +func createContainer(name string, args []string) error { + if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + return nil +} + +func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error { + if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + + logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second) + logCmd := exec.CommandContext(logCtx, "docker", "logs", "-f", name) + defer logCancel() + return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp()) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go new file mode 100644 index 000000000..2ec86d73f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "encoding/json" + "strings" + + "sigs.k8s.io/kind/pkg/exec" +) + +// IsAvailable checks if docker is available in the system +func IsAvailable() bool { + cmd := exec.Command("docker", "-v") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + return strings.HasPrefix(lines[0], "Docker version") +} + +// usernsRemap checks if userns-remap is enabled in dockerd +func usernsRemap() bool { + cmd := exec.Command("docker", "info", "--format", "'{{json .SecurityOptions}}'") + lines, err := exec.OutputLines(cmd) + if err != nil { + return false + } + if len(lines) > 0 { + if strings.Contains(lines[0], "name=userns") { + return true + } + } + return false +} + +// mountDevMapper checks if the Docker storage driver is Btrfs or ZFS +// or if the backing filesystem is Btrfs +func mountDevMapper() bool { + storage := "" + // check the docker storage driver + cmd := exec.Command("docker", "info", "-f", "{{.Driver}}") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + + storage = strings.ToLower(strings.TrimSpace(lines[0])) + if storage == "btrfs" || storage == "zfs" || storage == "devicemapper" { + return true + } + + // check the backing file system + // docker info -f '{{json .DriverStatus }}' + // [["Backing Filesystem","extfs"],["Supports d_type","true"],["Native Overlay Diff","true"]] + cmd = exec.Command("docker", "info", "-f", "{{json .DriverStatus }}") + lines, err = exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + var dat [][]string + if err := json.Unmarshal([]byte(lines[0]), &dat); err != nil { + return false + } + for _, item := range dat { + if item[0] == "Backing Filesystem" { + storage = strings.ToLower(item[1]) + break + } + } + + return storage == "btrfs" || storage == "zfs" || storage == "xfs" +} + +// rootless: use fuse-overlayfs by default +// https://github.com/kubernetes-sigs/kind/issues/2275 +func mountFuse() bool { + i, err := info() + if err != nil { + return false + } + if i != nil && i.Rootless { + return true + } + return false +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS new file mode 100644 index 000000000..167d41b76 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - aojea + - BenTheElder +approvers: + - aojea + - BenTheElder +emeritus_approvers: + - amwat + +labels: + - area/provider/podman diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go new file mode 100644 index 000000000..e30f167d2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +// clusterLabelKey is applied to each "node" podman container for identification +const clusterLabelKey = "io.x-k8s.kind.cluster" + +// nodeRoleLabelKey is applied to each "node" podman container for categorization +// of nodes by role +const nodeRoleLabelKey = "io.x-k8s.kind.role" diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go new file mode 100644 index 000000000..c44fead50 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go @@ -0,0 +1,115 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// ensureNodeImages ensures that the node images used by the create +// configuration are present +func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster) error { + // pull each required image + for _, image := range common.RequiredNodeImages(cfg).List() { + // prints user friendly message + friendlyImageName, image := sanitizeImage(image) + status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName)) + if _, err := pullIfNotPresent(logger, image, 4); err != nil { + status.End(false) + return err + } + } + return nil +} + +// pullIfNotPresent will pull an image if it is not present locally +// retrying up to retries times +// it returns true if it attempted to pull, and any errors from pulling +func pullIfNotPresent(logger log.Logger, image string, retries int) (pulled bool, err error) { + // TODO(bentheelder): switch most (all) of the logging here to debug level + // once we have configurable log levels + // if this did not return an error, then the image exists locally + cmd := exec.Command("podman", "inspect", "--type=image", image) + if err := cmd.Run(); err == nil { + logger.V(1).Infof("Image: %s present locally", image) + return false, nil + } + // otherwise try to pull it + return true, pull(logger, image, retries) +} + +// pull pulls an image, retrying up to retries times +func pull(logger log.Logger, image string, retries int) error { + logger.V(1).Infof("Pulling image: %s ...", image) + err := exec.Command("podman", "pull", image).Run() + // retry pulling up to retries times if necessary + if err != nil { + for i := 0; i < retries; i++ { + time.Sleep(time.Second * time.Duration(i+1)) + logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err) + // TODO(bentheelder): add some backoff / sleep? + err = exec.Command("podman", "pull", image).Run() + if err == nil { + break + } + } + } + return errors.Wrapf(err, "failed to pull image %q", image) +} + +// sanitizeImage is a helper to return human readable image name and +// the podman pullable image name from the provided image +func sanitizeImage(image string) (friendlyImageName, pullImageName string) { + const ( + defaultDomain = "docker.io/" + officialRepoName = "library" + ) + + var remainder string + + if strings.Contains(image, "@sha256:") { + splits := strings.Split(image, "@sha256:") + friendlyImageName = splits[0] + remainder = strings.Split(splits[0], ":")[0] + "@sha256:" + splits[1] + } else { + friendlyImageName = image + remainder = image + } + + if !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + + i := strings.IndexRune(friendlyImageName, '/') + if i == -1 || (!strings.ContainsAny(friendlyImageName[:i], ".:") && friendlyImageName[:i] != "localhost") { + pullImageName = defaultDomain + remainder + } else { + pullImageName = remainder + } + + return +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go new file mode 100644 index 000000000..ef138bcac --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go @@ -0,0 +1,146 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "crypto/sha1" + "encoding/binary" + "net" + "regexp" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// This may be overridden by KIND_EXPERIMENTAL_PODMAN_NETWORK env, +// experimentally... +// +// By default currently picking a single network is equivalent to the previous +// behavior *except* that we moved from the default bridge to a user defined +// network because the default bridge is actually special versus any other +// docker network and lacks the embedded DNS +// +// For now this also makes it easier for apps to join the same network, and +// leaves users with complex networking desires to create and manage their own +// networks. +const fixedNetworkName = "kind" + +// ensureNetwork creates a new network +// podman only creates IPv6 networks for versions >= 2.2.0 +func ensureNetwork(name string) error { + // network already exists + if checkIfNetworkExists(name) { + return nil + } + + // generate unique subnet per network based on the name + // obtained from the ULA fc00::/8 range + // Make N attempts with "probing" in case we happen to collide + subnet := generateULASubnetFromName(name, 0) + err := createNetwork(name, subnet) + if err == nil { + // Success! + return nil + } + + if isUnknownIPv6FlagError(err) || + isIPv6DisabledError(err) { + return createNetwork(name, "") + } + + // Only continue if the error is because of the subnet range + // is already allocated + if !isPoolOverlapError(err) { + return err + } + + // keep trying for ipv6 subnets + const maxAttempts = 5 + for attempt := int32(1); attempt < maxAttempts; attempt++ { + subnet := generateULASubnetFromName(name, attempt) + err = createNetwork(name, subnet) + if err == nil { + // success! + return nil + } else if !isPoolOverlapError(err) { + // unknown error ... + return err + } + } + return errors.New("exhausted attempts trying to find a non-overlapping subnet") + +} + +func createNetwork(name, ipv6Subnet string) error { + if ipv6Subnet == "" { + return exec.Command("podman", "network", "create", "-d=bridge", name).Run() + } + return exec.Command("podman", "network", "create", "-d=bridge", + "--ipv6", "--subnet", ipv6Subnet, name).Run() +} + +func checkIfNetworkExists(name string) bool { + _, err := exec.Output(exec.Command( + "podman", "network", "inspect", + regexp.QuoteMeta(name), + )) + return err == nil +} + +func isUnknownIPv6FlagError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && + strings.Contains(string(rerr.Output), "unknown flag: --ipv6") +} + +func isIPv6DisabledError(err error) bool { + rerr := exec.RunErrorForError(err) + return rerr != nil && + strings.Contains(string(rerr.Output), "is ipv6 enabled in the kernel") +} + +func isPoolOverlapError(err error) bool { + rerr := exec.RunErrorForError(err) + if rerr == nil { + return false + } + output := string(rerr.Output) + return strings.Contains(output, "is already used on the host or by another config") || + strings.Contains(output, "is being used by a network interface") || + strings.Contains(output, "is already being used by a cni configuration") +} + +// generateULASubnetFromName generate an IPv6 subnet based on the +// name and Nth probing attempt +func generateULASubnetFromName(name string, attempt int32) string { + ip := make([]byte, 16) + ip[0] = 0xfc + ip[1] = 0x00 + h := sha1.New() + _, _ = h.Write([]byte(name)) + _ = binary.Write(h, binary.LittleEndian, attempt) + bs := h.Sum(nil) + for i := 2; i < 8; i++ { + ip[i] = bs[i] + } + subnet := &net.IPNet{ + IP: net.IP(ip), + Mask: net.CIDRMask(64, 128), + } + return subnet.String() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go new file mode 100644 index 000000000..5285dd224 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "context" + "fmt" + "io" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// nodes.Node implementation for the podman provider +type node struct { + name string +} + +func (n *node) String() string { + return n.name +} + +func (n *node) Role() (string, error) { + cmd := exec.Command("podman", "inspect", + "--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey), + n.name, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get role for node") + } + if len(lines) != 1 { + return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines)) + } + return lines[0], nil +} + +func (n *node) IP() (ipv4 string, ipv6 string, err error) { + // retrieve the IP address of the node using podman inspect + cmd := exec.Command("podman", "inspect", + "-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}", + n.name, // ... against the "node" container + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", "", errors.Wrap(err, "failed to get container details") + } + if len(lines) != 1 { + return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + ips := strings.Split(lines[0], ",") + if len(ips) != 2 { + return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips)) + } + return ips[0], ips[1], nil +} + +func (n *node) Command(command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + } +} + +func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd { + return &nodeCmd{ + nameOrID: n.name, + command: command, + args: args, + ctx: ctx, + } +} + +// nodeCmd implements exec.Cmd for podman nodes +type nodeCmd struct { + nameOrID string // the container name or ID + command string + args []string + env []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + ctx context.Context +} + +func (c *nodeCmd) Run() error { + args := []string{ + "exec", + // run with privileges so we can remount etc.. + // this might not make sense in the most general sense, but it is + // important to many kind commands + "--privileged", + } + if c.stdin != nil { + args = append(args, + "-i", // interactive so we can supply input + ) + } + // set env + for _, env := range c.env { + args = append(args, "-e", env) + } + // specify the container and command, after this everything will be + // args the command in the container rather than to podman + args = append( + args, + c.nameOrID, // ... against the container + c.command, // with the command specified + ) + args = append( + args, + // finally, with the caller args + c.args..., + ) + var cmd exec.Cmd + if c.ctx != nil { + cmd = exec.CommandContext(c.ctx, "podman", args...) + } else { + cmd = exec.Command("podman", args...) + } + if c.stdin != nil { + cmd.SetStdin(c.stdin) + } + if c.stderr != nil { + cmd.SetStderr(c.stderr) + } + if c.stdout != nil { + cmd.SetStdout(c.stdout) + } + return cmd.Run() +} + +func (c *nodeCmd) SetEnv(env ...string) exec.Cmd { + c.env = env + return c +} + +func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd { + c.stdin = r + return c +} + +func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd { + c.stdout = w + return c +} + +func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd { + c.stderr = w + return c +} + +func (n *node) SerialLogs(w io.Writer) error { + return exec.Command("podman", "logs", n.name).SetStdout(w).SetStderr(w).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go new file mode 100644 index 000000000..856b07b04 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go @@ -0,0 +1,442 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + "sigs.k8s.io/kind/pkg/log" + + internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs" + "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/sets" + "sigs.k8s.io/kind/pkg/internal/version" +) + +// NewProvider returns a new provider based on executing `podman ...` +func NewProvider(logger log.Logger) providers.Provider { + logger.Warn("enabling experimental podman provider") + return &provider{ + logger: logger, + } +} + +// Provider implements provider.Provider +// see NewProvider +type provider struct { + logger log.Logger + info *providers.ProviderInfo +} + +// String implements fmt.Stringer +// NOTE: the value of this should not currently be relied upon for anything! +// This is only used for setting the Node's providerID +func (p *provider) String() string { + return "podman" +} + +// Provision is part of the providers.Provider interface +func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) { + if err := ensureMinVersion(); err != nil { + return err + } + + // TODO: validate cfg + // ensure node images are pulled before actually provisioning + if err := ensureNodeImages(p.logger, status, cfg); err != nil { + return err + } + + // ensure the pre-requisite network exists + networkName := fixedNetworkName + if n := os.Getenv("KIND_EXPERIMENTAL_PODMAN_NETWORK"); n != "" { + p.logger.Warn("WARNING: Overriding podman network due to KIND_EXPERIMENTAL_PODMAN_NETWORK") + p.logger.Warn("WARNING: Here be dragons! This is not supported currently.") + networkName = n + } + if err := ensureNetwork(networkName); err != nil { + return errors.Wrap(err, "failed to ensure podman network") + } + + // actually provision the cluster + icons := strings.Repeat("📦 ", len(cfg.Nodes)) + status.Start(fmt.Sprintf("Preparing nodes %s", icons)) + defer func() { status.End(err == nil) }() + + // plan creating the containers + createContainerFuncs, err := planCreation(cfg, networkName) + if err != nil { + return err + } + + // actually create nodes + return errors.UntilErrorConcurrent(createContainerFuncs) +} + +// ListClusters is part of the providers.Provider interface +func (p *provider) ListClusters() ([]string, error) { + cmd := exec.Command("podman", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", "label="+clusterLabelKey, + // format to include the cluster name + "--format", fmt.Sprintf(`{{index .Labels "%s"}}`, clusterLabelKey), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list clusters") + } + return sets.NewString(lines...).List(), nil +} + +// ListNodes is part of the providers.Provider interface +func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) { + cmd := exec.Command("podman", + "ps", + "-a", // show stopped nodes + // filter for nodes with the cluster label + "--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster), + // format to include the cluster name + "--format", `{{.Names}}`, + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, errors.Wrap(err, "failed to list nodes") + } + // convert names to node handles + ret := make([]nodes.Node, 0, len(lines)) + for _, name := range lines { + ret = append(ret, p.node(name)) + } + return ret, nil +} + +// DeleteNodes is part of the providers.Provider interface +func (p *provider) DeleteNodes(n []nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "podman" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + var nodeVolumes []string + for _, node := range n { + volumes, err := getVolumes(node.String()) + if err != nil { + return err + } + nodeVolumes = append(nodeVolumes, volumes...) + } + if len(nodeVolumes) == 0 { + return nil + } + return deleteVolumes(nodeVolumes) +} + +// GetAPIServerEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get api server endpoint") + } + + // TODO: get rid of this once podman settles on how to get the port mapping using podman inspect + // This is only used to get the Kubeconfig server field + v, err := getPodmanVersion() + if err != nil { + return "", errors.Wrap(err, "failed to check podman version") + } + // podman inspect was broken between 2.2.0 and 3.0.0 + // https://github.com/containers/podman/issues/8444 + if v.AtLeast(version.MustParseSemantic("2.2.0")) && + v.LessThan(version.MustParseSemantic("3.0.0")) { + p.logger.Warnf("WARNING: podman version %s not fully supported, please use versions 3.0.0+") + + cmd := exec.Command( + "podman", "inspect", + "--format", + "{{range .NetworkSettings.Ports }}{{range .}}{{.HostIP}}/{{.HostPort}}{{end}}{{end}}", + n.String(), + ) + + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + // output is in the format IP/Port + parts := strings.Split(strings.TrimSpace(lines[0]), "/") + if len(parts) != 2 { + return "", errors.Errorf("network details should be in the format IP/Port, received: %s", parts) + } + host := parts[0] + port, err := strconv.Atoi(parts[1]) + if err != nil { + return "", errors.Errorf("network port not an integer: %v", err) + } + + return net.JoinHostPort(host, strconv.Itoa(port)), nil + } + + cmd := exec.Command( + "podman", "inspect", + "--format", + "{{ json .NetworkSettings.Ports }}", + n.String(), + ) + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get api server port") + } + if len(lines) != 1 { + return "", errors.Errorf("network details should only be one line, got %d lines", len(lines)) + } + + // portMapping19 maps to the standard CNI portmapping capability used in podman 1.9 + // see: https://github.com/containernetworking/cni/blob/spec-v0.4.0/CONVENTIONS.md + type portMapping19 struct { + HostPort int32 `json:"hostPort"` + ContainerPort int32 `json:"containerPort"` + Protocol string `json:"protocol"` + HostIP string `json:"hostIP"` + } + // portMapping20 maps to the podman 2.0 portmap type + // see: https://github.com/containers/podman/blob/05988fc74fc25f2ad2256d6e011dfb7ad0b9a4eb/libpod/define/container_inspect.go#L134-L143 + type portMapping20 struct { + HostPort string `json:"HostPort"` + HostIP string `json:"HostIp"` + } + + portMappings20 := make(map[string][]portMapping20) + if err := json.Unmarshal([]byte(lines[0]), &portMappings20); err == nil { + for k, v := range portMappings20 { + protocol := "tcp" + parts := strings.Split(k, "/") + if len(parts) == 2 { + protocol = strings.ToLower(parts[1]) + } + containerPort, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + for _, pm := range v { + if containerPort == common.APIServerInternalPort && protocol == "tcp" { + return net.JoinHostPort(pm.HostIP, pm.HostPort), nil + } + } + } + } + + var portMappings19 []portMapping19 + if err := json.Unmarshal([]byte(lines[0]), &portMappings19); err != nil { + return "", errors.Errorf("invalid network details: %v", err) + } + for _, pm := range portMappings19 { + if pm.ContainerPort == common.APIServerInternalPort && pm.Protocol == "tcp" { + return net.JoinHostPort(pm.HostIP, strconv.Itoa(int(pm.HostPort))), nil + } + } + + return "", errors.Errorf("failed to get api server port") +} + +// GetAPIServerInternalEndpoint is part of the providers.Provider interface +func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) { + // locate the node that hosts this + allNodes, err := p.ListNodes(cluster) + if err != nil { + return "", errors.Wrap(err, "failed to list nodes") + } + n, err := nodeutils.APIServerEndpointNode(allNodes) + if err != nil { + return "", errors.Wrap(err, "failed to get apiserver endpoint") + } + // NOTE: we're using the nodes's hostnames which are their names + return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil +} + +// node returns a new node handle for this provider +func (p *provider) node(name string) nodes.Node { + return &node{ + name: name, + } +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error { + execToPathFn := func(cmd exec.Cmd, path string) func() error { + return func() error { + f, err := common.FileOnHost(path) + if err != nil { + return err + } + defer f.Close() + return cmd.SetStdout(f).SetStderr(f).Run() + } + } + // construct a slice of methods to collect logs + fns := []func() error{ + // record info about the host podman + execToPathFn( + exec.Command("podman", "info"), + filepath.Join(dir, "podman-info.txt"), + ), + } + + // collect /var/log for each node and plan collecting more logs + var errs []error + for _, n := range nodes { + node := n // https://golang.org/doc/faq#closures_and_goroutines + name := node.String() + path := filepath.Join(dir, name) + if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil { + errs = append(errs, err) + } + + fns = append(fns, + func() error { return common.CollectLogs(node, path) }, + execToPathFn(exec.Command("podman", "inspect", name), filepath.Join(path, "inspect.json")), + func() error { + f, err := common.FileOnHost(filepath.Join(path, "serial.log")) + if err != nil { + return err + } + return node.SerialLogs(f) + }, + ) + } + + // run and collect up all errors + errs = append(errs, errors.AggregateConcurrent(fns)) + return errors.NewAggregate(errs) +} + +// Info returns the provider info. +// The info is cached on the first time of the execution. +func (p *provider) Info() (*providers.ProviderInfo, error) { + if p.info == nil { + var err error + p.info, err = info(p.logger) + if err != nil { + return p.info, err + } + } + return p.info, nil +} + +// podmanInfo corresponds to `podman info --format 'json`. +// The structure is different from `docker info --format '{{json .}}'`, +// and lacks information about the availability of the cgroup controllers. +type podmanInfo struct { + Host struct { + CgroupVersion string `json:"cgroupVersion,omitempty"` // "v2" + CgroupControllers []string `json:"cgroupControllers,omitempty"` + Security struct { + Rootless bool `json:"rootless,omitempty"` + } `json:"security"` + } `json:"host"` +} + +// info detects ProviderInfo by executing `podman info --format json`. +func info(logger log.Logger) (*providers.ProviderInfo, error) { + const podman = "podman" + args := []string{"info", "--format", "json"} + cmd := exec.Command(podman, args...) + out, err := exec.Output(cmd) + if err != nil { + return nil, errors.Wrapf(err, "failed to get podman info (%s %s): %q", + podman, strings.Join(args, " "), string(out)) + } + var pInfo podmanInfo + if err := json.Unmarshal(out, &pInfo); err != nil { + return nil, err + } + stringSliceContains := func(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + return false + } + + // Since Podman version before v4.0.0 does not gives controller info. + // We assume all the cgroup controllers to be available. + // For rootless, this assumption is not always correct, + // so we print the warning below. + cgroupSupportsMemoryLimit := true + cgroupSupportsPidsLimit := true + cgroupSupportsCPUShares := true + + v, err := getPodmanVersion() + if err != nil { + return nil, errors.Wrap(err, "failed to check podman version") + } + // Info for controllers must be available after v4.0.0 + // via https://github.com/containers/podman/pull/10387 + if v.AtLeast(version.MustParseSemantic("4.0.0")) { + cgroupSupportsMemoryLimit = stringSliceContains(pInfo.Host.CgroupControllers, "memory") + cgroupSupportsPidsLimit = stringSliceContains(pInfo.Host.CgroupControllers, "pids") + cgroupSupportsCPUShares = stringSliceContains(pInfo.Host.CgroupControllers, "cpu") + } + + info := &providers.ProviderInfo{ + Rootless: pInfo.Host.Security.Rootless, + Cgroup2: pInfo.Host.CgroupVersion == "v2", + SupportsMemoryLimit: cgroupSupportsMemoryLimit, + SupportsPidsLimit: cgroupSupportsPidsLimit, + SupportsCPUShares: cgroupSupportsCPUShares, + } + if info.Rootless && !v.AtLeast(version.MustParseSemantic("4.0.0")) { + if logger != nil { + logger.Warn("Cgroup controller detection is not implemented for Podman. " + + "If you see cgroup-related errors, you might need to set systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/") + } + } + return info, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go new file mode 100644 index 000000000..c240a2929 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go @@ -0,0 +1,436 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "context" + "encoding/json" + "fmt" + "net" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/common" + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// planCreation creates a slice of funcs that will create the containers +func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs []func() error, err error) { + // these apply to all container creation + nodeNamer := common.MakeNodeNamer(cfg.Name) + names := make([]string, len(cfg.Nodes)) + for i, node := range cfg.Nodes { + name := nodeNamer(string(node.Role)) // name the node + names[i] = name + } + haveLoadbalancer := config.ClusterHasImplicitLoadBalancer(cfg) + if haveLoadbalancer { + names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue)) + } + genericArgs, err := commonArgs(cfg, networkName, names) + if err != nil { + return nil, err + } + + // only the external LB should reflect the port if we have multiple control planes + apiServerPort := cfg.Networking.APIServerPort + apiServerAddress := cfg.Networking.APIServerAddress + if config.ClusterHasImplicitLoadBalancer(cfg) { + // TODO: picking ports locally is less than ideal with a remote runtime + // (does podman have this?) + // but this is supposed to be an implementation detail and NOT picking + // them breaks host reboot ... + // For now remote podman + multi control plane is not supported + apiServerPort = 0 // replaced with random ports + apiServerAddress = "127.0.0.1" // only the LB needs to be non-local + // only for IPv6 only clusters + if cfg.Networking.IPFamily == config.IPv6Family { + apiServerAddress = "::1" // only the LB needs to be non-local + } + // plan loadbalancer node + name := names[len(names)-1] + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForLoadBalancer(cfg, name, genericArgs) + if err != nil { + return err + } + return createContainer(name, args) + }) + } + + // plan normal nodes + for i, node := range cfg.Nodes { + node := node.DeepCopy() // copy so we can modify + name := names[i] + + // fixup relative paths, podman can only handle absolute paths + for i := range node.ExtraMounts { + hostPath := node.ExtraMounts[i].HostPath + absHostPath, err := filepath.Abs(hostPath) + if err != nil { + return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath) + } + node.ExtraMounts[i].HostPath = absHostPath + } + + // plan actual creation based on role + switch node.Role { + case config.ControlPlaneRole: + createContainerFuncs = append(createContainerFuncs, func() error { + node.ExtraPortMappings = append(node.ExtraPortMappings, + config.PortMapping{ + ListenAddress: apiServerAddress, + HostPort: apiServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + case config.WorkerRole: + createContainerFuncs = append(createContainerFuncs, func() error { + args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs) + if err != nil { + return err + } + return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args) + }) + default: + return nil, errors.Errorf("unknown node role: %q", node.Role) + } + } + return createContainerFuncs, nil +} + +// commonArgs computes static arguments that apply to all containers +func commonArgs(cfg *config.Cluster, networkName string, nodeNames []string) ([]string, error) { + // standard arguments all nodes containers need, computed once + args := []string{ + "--detach", // run the container detached + "--tty", // allocate a tty for entrypoint logs + "--net", networkName, // attach to its own network + // label the node with the cluster ID + "--label", fmt.Sprintf("%s=%s", clusterLabelKey, cfg.Name), + // specify container implementation to systemd + "-e", "container=podman", + // this is the default in cgroupsv2 but not in v1 + "--cgroupns=private", + } + + // enable IPv6 if necessary + if config.ClusterHasIPv6(cfg) { + args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") + } + + // pass proxy environment variables + proxyEnv, err := getProxyEnv(cfg, networkName, nodeNames) + if err != nil { + return nil, errors.Wrap(err, "proxy setup error") + } + for key, val := range proxyEnv { + args = append(args, "-e", fmt.Sprintf("%s=%s", key, val)) + } + + // handle Podman on Btrfs or ZFS same as we do with Docker + // https://github.com/kubernetes-sigs/kind/issues/1416#issuecomment-606514724 + if mountDevMapper() { + args = append(args, "--volume", "/dev/mapper:/dev/mapper") + } + + // rootless: use fuse-overlayfs by default + // https://github.com/kubernetes-sigs/kind/issues/2275 + if mountFuse() { + args = append(args, "--device", "/dev/fuse") + } + + if cfg.Networking.DNSSearch != nil { + args = append(args, "-e", "KIND_DNS_SEARCH="+strings.Join(*cfg.Networking.DNSSearch, " ")) + } + + return args, nil +} + +func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { + // Pre-create anonymous volumes to enable specifying mount options + // during container run time + varVolume, err := createAnonymousVolume(name) + if err != nil { + return nil, err + } + + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), + // running containers in a container requires privileged + // NOTE: we could try to replicate this with --cap-add, and use less + // privileges, but this flag also changes some mounts that are necessary + // including some ones podman would otherwise do by default. + // for now this is what we want. in the future we may revisit this. + "--privileged", + // runtime temporary storage + "--tmpfs", "/tmp", // various things depend on working /tmp + "--tmpfs", "/run", // systemd wants a writable /run + // runtime persistent storage + // this ensures that E.G. pods, logs etc. are not on the container + // filesystem, which is not only better for performance, but allows + // running kind in kind for "party tricks" + // (please don't depend on doing this though!) + // also enable default docker volume options + // suid: SUID applications on the volume will be able to change their privilege + // exec: executables on the volume will be able to executed within the container + // dev: devices on the volume will be able to be used by processes within the container + "--volume", fmt.Sprintf("%s:/var:suid,exec,dev", varVolume), + // some k8s things want to read /lib/modules + "--volume", "/lib/modules:/lib/modules:ro", + // propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script + "-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER", + }, + args..., + ) + + // convert mounts and port mappings to container run args + args = append(args, generateMountBindings(node.ExtraMounts...)...) + mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + switch node.Role { + case config.ControlPlaneRole: + args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf") + } + + // finally, specify the image to run + _, image := sanitizeImage(node.Image) + return append(args, image), nil +} + +func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { + args = append([]string{ + "--hostname", name, // make hostname match container name + // label the node with the role ID + "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), + }, + args..., + ) + + // load balancer port mapping + mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily, + config.PortMapping{ + ListenAddress: cfg.Networking.APIServerAddress, + HostPort: cfg.Networking.APIServerPort, + ContainerPort: common.APIServerInternalPort, + }, + ) + if err != nil { + return nil, err + } + args = append(args, mappingArgs...) + + // finally, specify the image to run + _, image := sanitizeImage(loadbalancer.Image) + return append(args, image), nil +} + +func getProxyEnv(cfg *config.Cluster, networkName string, nodeNames []string) (map[string]string, error) { + envs := common.GetProxyEnvs(cfg) + // Specifically add the podman network subnets to NO_PROXY if we are using a proxy + if len(envs) > 0 { + // kind default bridge is "kind" + subnets, err := getSubnets(networkName) + if err != nil { + return nil, err + } + noProxyList := append(subnets, envs[common.NOProxy]) + noProxyList = append(noProxyList, nodeNames...) + // Add pod,service and all the cluster nodes' dns names to no_proxy to allow in cluster + // Note: this is best effort based on the default CoreDNS spec + // https://github.com/kubernetes/dns/blob/master/docs/specification.md + // Any user created pod/service hostnames, namespaces, custom DNS services + // are expected to be no-proxied by the user explicitly. + + noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local") + noProxyJoined := strings.Join(noProxyList, ",") + envs[common.NOProxy] = noProxyJoined + envs[strings.ToLower(common.NOProxy)] = noProxyJoined + } + return envs, nil +} + +type podmanNetworks []struct { + // v4+ + Subnets []struct { + Subnet string `json:"subnet"` + Gateway string `json:"gateway"` + } `json:"subnets"` + // v3 and anything still using CNI/IPAM + Plugins []struct { + Ipam struct { + Ranges [][]struct { + Gateway string `json:"gateway"` + Subnet string `json:"subnet"` + } `json:"ranges"` + } `json:"ipam,omitempty"` + } `json:"plugins"` +} + +func getSubnets(networkName string) ([]string, error) { + cmd := exec.Command("podman", "network", "inspect", networkName) + out, err := exec.Output(cmd) + + if err != nil { + return nil, errors.Wrap(err, "failed to get subnets") + } + + networks := podmanNetworks{} + jsonErr := json.Unmarshal([]byte(out), &networks) + if jsonErr != nil { + return nil, errors.Wrap(jsonErr, "failed to get subnets") + } + subnets := []string{} + for _, network := range networks { + if len(network.Subnets) > 0 { + for _, subnet := range network.Subnets { + subnets = append(subnets, subnet.Subnet) + } + } + if len(network.Plugins) > 0 { + for _, plugin := range network.Plugins { + for _, r := range plugin.Ipam.Ranges { + for _, rr := range r { + subnets = append(subnets, rr.Subnet) + } + } + } + } + } + return subnets, nil +} + +// generateMountBindings converts the mount list to a list of args for podman +// ':[:options]', where 'options' +// is a comma-separated list of the following strings: +// 'ro', if the path is read only +// 'Z', if the volume requires SELinux relabeling +func generateMountBindings(mounts ...config.Mount) []string { + args := make([]string, 0, len(mounts)) + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + var attrs []string + if m.Readonly { + attrs = append(attrs, "ro") + } + // Only request relabeling if the pod provides an SELinux context. If the pod + // does not provide an SELinux context relabeling will label the volume with + // the container's randomly allocated MCS label. This would restrict access + // to the volume to the container which mounts it first. + if m.SelinuxRelabel { + attrs = append(attrs, "Z") + } + switch m.Propagation { + case config.MountPropagationNone: + // noop, private is default + case config.MountPropagationBidirectional: + attrs = append(attrs, "rshared") + case config.MountPropagationHostToContainer: + attrs = append(attrs, "rslave") + default: // Falls back to "private" + } + if len(attrs) > 0 { + bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ",")) + } + args = append(args, fmt.Sprintf("--volume=%s", bind)) + } + return args +} + +// generatePortMappings converts the portMappings list to a list of args for podman +func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) { + args := make([]string, 0, len(portMappings)) + for _, pm := range portMappings { + // do provider internal defaulting + // in a future API revision we will handle this at the API level and remove this + if pm.ListenAddress == "" { + switch clusterIPFamily { + case config.IPv4Family, config.DualStackFamily: + pm.ListenAddress = "0.0.0.0" + case config.IPv6Family: + pm.ListenAddress = "::" + default: + return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily) + } + } + if string(pm.Protocol) == "" { + pm.Protocol = config.PortMappingProtocolTCP // TCP is the default + } + + // validate that the provider can handle this binding + switch pm.Protocol { + case config.PortMappingProtocolTCP: + case config.PortMappingProtocolUDP: + case config.PortMappingProtocolSCTP: + default: + return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol) + } + + // get a random port if necessary (port = 0) + hostPort, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to get random host port for port mapping") + } + + // generate the actual mapping arg + protocol := string(pm.Protocol) + hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort)) + // Podman expects empty string instead of 0 to assign a random port + // https://github.com/containers/libpod/blob/master/pkg/spec/ports.go#L68-L69 + if strings.HasSuffix(hostPortBinding, ":0") { + hostPortBinding = strings.TrimSuffix(hostPortBinding, "0") + } + args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, strings.ToLower(protocol))) + } + return args, nil +} + +func createContainer(name string, args []string) error { + if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + return nil +} + +func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error { + if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil { + return err + } + + logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer logCancel() + logCmd := exec.CommandContext(logCtx, "podman", "logs", "-f", name) + return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp()) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go new file mode 100644 index 000000000..969f09ccb --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go @@ -0,0 +1,169 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podman + +import ( + "encoding/json" + "fmt" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" + + "sigs.k8s.io/kind/pkg/internal/version" +) + +// IsAvailable checks if podman is available in the system +func IsAvailable() bool { + cmd := exec.Command("podman", "-v") + lines, err := exec.OutputLines(cmd) + if err != nil || len(lines) != 1 { + return false + } + return strings.HasPrefix(lines[0], "podman version") +} + +func getPodmanVersion() (*version.Version, error) { + cmd := exec.Command("podman", "--version") + lines, err := exec.OutputLines(cmd) + if err != nil { + return nil, err + } + + // output is like `podman version 1.7.1-dev` + if len(lines) != 1 { + return nil, errors.Errorf("podman version should only be one line, got %d", len(lines)) + } + parts := strings.Split(lines[0], " ") + if len(parts) != 3 { + return nil, errors.Errorf("podman --version contents should have 3 parts, got %q", lines[0]) + } + return version.ParseSemantic(parts[2]) +} + +const ( + minSupportedVersion = "1.8.0" +) + +func ensureMinVersion() error { + // ensure that podman version is a compatible version + v, err := getPodmanVersion() + if err != nil { + return errors.Wrap(err, "failed to check podman version") + } + if !v.AtLeast(version.MustParseSemantic(minSupportedVersion)) { + return errors.Errorf("podman version %q is too old, please upgrade to %q or later", v, minSupportedVersion) + } + return nil +} + +// createAnonymousVolume creates a new anonymous volume +// with the specified label=true +// returns the name of the volume created +func createAnonymousVolume(label string) (string, error) { + cmd := exec.Command("podman", + "volume", + "create", + // podman only support filter on key during list + // so we use the unique id as key + "--label", fmt.Sprintf("%s=true", label)) + name, err := exec.Output(cmd) + if err != nil { + return "", err + } + return strings.TrimSuffix(string(name), "\n"), nil +} + +// getVolumes gets volume names filtered on specified label +func getVolumes(label string) ([]string, error) { + cmd := exec.Command("podman", + "volume", + "ls", + "--filter", fmt.Sprintf("label=%s", label), + "--quiet") + // `output` from the above command is names of all volumes each followed by `\n`. + output, err := exec.Output(cmd) + if err != nil { + return nil, err + } + if string(output) == "" { + // no volumes + return nil, nil + } + // Trim away the last `\n`. + trimmedOutput := strings.TrimSuffix(string(output), "\n") + // Get names of all volumes by splitting via `\n`. + return strings.Split(string(trimmedOutput), "\n"), nil +} + +func deleteVolumes(names []string) error { + args := []string{ + "volume", + "rm", + "--force", + } + args = append(args, names...) + cmd := exec.Command("podman", args...) + return cmd.Run() +} + +// mountDevMapper checks if the podman storage driver is Btrfs or ZFS +func mountDevMapper() bool { + cmd := exec.Command("podman", "info", "--format", "json") + out, err := exec.Output(cmd) + if err != nil { + return false + } + + var pInfo podmanStorageInfo + if err := json.Unmarshal(out, &pInfo); err != nil { + return false + } + + // match docker logic pkg/cluster/internal/providers/docker/util.go + if pInfo.Store.GraphDriverName == "btrfs" || + pInfo.Store.GraphDriverName == "zfs" || + pInfo.Store.GraphDriverName == "devicemapper" || + pInfo.Store.GraphStatus.BackingFilesystem == "btrfs" || + pInfo.Store.GraphStatus.BackingFilesystem == "xfs" || + pInfo.Store.GraphStatus.BackingFilesystem == "zfs" { + return true + } + return false +} + +type podmanStorageInfo struct { + Store struct { + GraphDriverName string `json:"graphDriverName,omitempty"` + GraphStatus struct { + BackingFilesystem string `json:"Backing Filesystem,omitempty"` // "v2" + } `json:"graphStatus"` + } `json:"store"` +} + +// rootless: use fuse-overlayfs by default +// https://github.com/kubernetes-sigs/kind/issues/2275 +func mountFuse() bool { + i, err := info(nil) + if err != nil { + return false + } + if i != nil && i.Rootless { + return true + } + return false +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go new file mode 100644 index 000000000..cc4270609 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package providers + +import ( + "sigs.k8s.io/kind/pkg/cluster/nodes" + + "sigs.k8s.io/kind/pkg/internal/apis/config" + "sigs.k8s.io/kind/pkg/internal/cli" +) + +// Provider represents a provider of cluster / node infrastructure +// This is an alpha-grade internal API +type Provider interface { + // Provision should create and start the nodes, just short of + // actually starting up Kubernetes, based on the given cluster config + Provision(status *cli.Status, cfg *config.Cluster) error + // ListClusters discovers the clusters that currently have resources + // under this providers + ListClusters() ([]string, error) + // ListNodes returns the nodes under this provider for the given + // cluster name, they may or may not be running correctly + ListNodes(cluster string) ([]nodes.Node, error) + // DeleteNodes deletes the provided list of nodes + // These should be from results previously returned by this provider + // E.G. by ListNodes() + DeleteNodes([]nodes.Node) error + // GetAPIServerEndpoint returns the host endpoint for the cluster's API server + GetAPIServerEndpoint(cluster string) (string, error) + // GetAPIServerInternalEndpoint returns the internal network endpoint for the cluster's API server + GetAPIServerInternalEndpoint(cluster string) (string, error) + // CollectLogs will populate dir with cluster logs and other debug files + CollectLogs(dir string, nodes []nodes.Node) error + // Info returns the provider info + Info() (*ProviderInfo, error) +} + +// ProviderInfo is the info of the provider +type ProviderInfo struct { + Rootless bool + Cgroup2 bool + SupportsMemoryLimit bool + SupportsPidsLimit bool + SupportsCPUShares bool +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go new file mode 100644 index 000000000..4cd87b0ec --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodes provides a kind specific definition of a cluster node +package nodes diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go new file mode 100644 index 000000000..a2c1b33b4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodes + +import ( + "io" + + "sigs.k8s.io/kind/pkg/exec" +) + +// Node represents a kind cluster node +type Node interface { + // The node should implement exec.Cmder for running commands against the node + // see: sigs.k8s.io/kind/pkg/exec + exec.Cmder + // String should return the node name + String() string // see also: fmt.Stringer + // Role should return the node's role + Role() (string, error) // see also: pkg/cluster/constants + // TODO(bentheelder): should return node addresses more generally + // Possibly remove this method in favor of obtaining this detail with + // exec or from the provider + IP() (ipv4 string, ipv6 string, err error) + // SerialLogs collects the "node" container logs + SerialLogs(writer io.Writer) error +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go new file mode 100644 index 000000000..b1e72e95a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nodeutils contains functionality for Kubernetes-in-Docker nodes +// It mostly exists to break up functionality from sigs.k8s.io/kind/pkg/cluster +package nodeutils diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go new file mode 100644 index 000000000..27deb93cc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go @@ -0,0 +1,153 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeutils + +import ( + "sort" + "strings" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" +) + +// SelectNodesByRole returns a list of nodes with the matching role +// TODO(bentheelder): remove this in favor of specific role select methods +// and avoid the unnecessary error handling +func SelectNodesByRole(allNodes []nodes.Node, role string) ([]nodes.Node, error) { + out := []nodes.Node{} + for _, node := range allNodes { + nodeRole, err := node.Role() + if err != nil { + return nil, err + } + if nodeRole == role { + out = append(out, node) + } + } + return out, nil +} + +// InternalNodes returns the list of container IDs for the "nodes" in the cluster +// that are ~Kubernetes nodes, as opposed to e.g. the external loadbalancer for HA +func InternalNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + selectedNodes := []nodes.Node{} + for _, node := range allNodes { + nodeRole, err := node.Role() + if err != nil { + return nil, err + } + if nodeRole == constants.WorkerNodeRoleValue || nodeRole == constants.ControlPlaneNodeRoleValue { + selectedNodes = append(selectedNodes, node) + } + } + return selectedNodes, nil +} + +// ExternalLoadBalancerNode returns a node handle for the external control plane +// loadbalancer node or nil if there isn't one +func ExternalLoadBalancerNode(allNodes []nodes.Node) (nodes.Node, error) { + // identify and validate external load balancer node + loadBalancerNodes, err := SelectNodesByRole( + allNodes, + constants.ExternalLoadBalancerNodeRoleValue, + ) + if err != nil { + return nil, err + } + if len(loadBalancerNodes) < 1 { + return nil, nil + } + if len(loadBalancerNodes) > 1 { + return nil, errors.Errorf( + "unexpected number of %s nodes %d", + constants.ExternalLoadBalancerNodeRoleValue, + len(loadBalancerNodes), + ) + } + return loadBalancerNodes[0], nil +} + +// APIServerEndpointNode selects the node from allNodes which hosts the API Server endpoint +// This should be the control plane node if there is one control plane node, or a LoadBalancer otherwise. +// It returns an error if the node list is invalid (E.G. two control planes and no load balancer) +func APIServerEndpointNode(allNodes []nodes.Node) (nodes.Node, error) { + if n, err := ExternalLoadBalancerNode(allNodes); err != nil { + return nil, errors.Wrap(err, "failed to find api-server endpoint node") + } else if n != nil { + return n, nil + } + n, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, errors.Wrap(err, "failed to find api-server endpoint node") + } + if len(n) != 1 { + return nil, errors.Errorf("expected one control plane node or a load balancer, not %d and none", len(n)) + } + return n[0], nil +} + +// ControlPlaneNodes returns all control plane nodes such that the first entry +// is the bootstrap control plane node +func ControlPlaneNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + controlPlaneNodes, err := SelectNodesByRole( + allNodes, + constants.ControlPlaneNodeRoleValue, + ) + if err != nil { + return nil, err + } + // pick the first by sorting + // TODO(bentheelder): perhaps in the future we should mark this node + // specially at container creation time + sort.Slice(controlPlaneNodes, func(i, j int) bool { + return strings.Compare(controlPlaneNodes[i].String(), controlPlaneNodes[j].String()) < 0 + }) + return controlPlaneNodes, nil +} + +// BootstrapControlPlaneNode returns a handle to the bootstrap control plane node +// TODO(bentheelder): remove this. This node shouldn't be special (fix that first) +func BootstrapControlPlaneNode(allNodes []nodes.Node) (nodes.Node, error) { + controlPlaneNodes, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, err + } + if len(controlPlaneNodes) < 1 { + return nil, errors.Errorf( + "expected at least one %s node", + constants.ControlPlaneNodeRoleValue, + ) + } + return controlPlaneNodes[0], nil +} + +// SecondaryControlPlaneNodes returns handles to the secondary +// control plane nodes and NOT the bootstrap control plane node +func SecondaryControlPlaneNodes(allNodes []nodes.Node) ([]nodes.Node, error) { + controlPlaneNodes, err := ControlPlaneNodes(allNodes) + if err != nil { + return nil, err + } + if len(controlPlaneNodes) < 1 { + return nil, errors.Errorf( + "expected at least one %s node", + constants.ControlPlaneNodeRoleValue, + ) + } + return controlPlaneNodes[1:], nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go new file mode 100644 index 000000000..501681a2c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go @@ -0,0 +1,156 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeutils + +import ( + "bytes" + "encoding/json" + "io" + "path" + "strings" + + "github.com/pelletier/go-toml" + + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/exec" +) + +// KubeVersion returns the Kubernetes version installed on the node +func KubeVersion(n nodes.Node) (version string, err error) { + // grab kubernetes version from the node image + cmd := n.Command("cat", "/kind/version") + lines, err := exec.OutputLines(cmd) + if err != nil { + return "", errors.Wrap(err, "failed to get file") + } + if len(lines) != 1 { + return "", errors.Errorf("file should only be one line, got %d lines", len(lines)) + } + return lines[0], nil +} + +// WriteFile writes content to dest on the node +func WriteFile(n nodes.Node, dest, content string) error { + // create destination directory + err := n.Command("mkdir", "-p", path.Dir(dest)).Run() + if err != nil { + return errors.Wrapf(err, "failed to create directory %s", path.Dir(dest)) + } + + return n.Command("cp", "/dev/stdin", dest).SetStdin(strings.NewReader(content)).Run() +} + +// CopyNodeToNode copies file from a to b +func CopyNodeToNode(a, b nodes.Node, file string) error { + // create destination directory + err := b.Command("mkdir", "-p", path.Dir(file)).Run() + if err != nil { + return errors.Wrapf(err, "failed to create directory %q", path.Dir(file)) + } + + // TODO: experiment with streaming instead to avoid the copy + // for now we only use this for small files so it's not worth the complexity + var buff bytes.Buffer + if err := a.Command("cat", file).SetStdout(&buff).Run(); err != nil { + return errors.Wrapf(err, "failed to read %q from node", file) + } + if err := b.Command("cp", "/dev/stdin", file).SetStdin(&buff).Run(); err != nil { + return errors.Wrapf(err, "failed to write %q to node", file) + } + + return nil +} + +// LoadImageArchive loads image onto the node, where image is a Reader over an image archive +func LoadImageArchive(n nodes.Node, image io.Reader) error { + snapshotter, err := getSnapshotter(n) + if err != nil { + return err + } + cmd := n.Command("ctr", "--namespace=k8s.io", "images", "import", "--all-platforms", "--digests", "--snapshotter="+snapshotter, "-").SetStdin(image) + if err := cmd.Run(); err != nil { + return errors.Wrap(err, "failed to load image") + } + return nil +} + +func getSnapshotter(n nodes.Node) (string, error) { + out, err := exec.Output(n.Command("containerd", "config", "dump")) + if err != nil { + return "", errors.Wrap(err, "failed to detect containerd snapshotter") + } + return parseSnapshotter(string(out)) +} + +func parseSnapshotter(config string) (string, error) { + parsed, err := toml.Load(config) + if err != nil { + return "", errors.Wrap(err, "failed to detect containerd snapshotter") + } + snapshotter, ok := parsed.GetPath([]string{"plugins", "io.containerd.grpc.v1.cri", "containerd", "snapshotter"}).(string) + if !ok { + return "", errors.New("failed to detect containerd snapshotter") + } + return snapshotter, nil +} + +// ImageID returns ID of image on the node with the given image name if present +func ImageID(n nodes.Node, image string) (string, error) { + var out bytes.Buffer + if err := n.Command("crictl", "inspecti", image).SetStdout(&out).Run(); err != nil { + return "", err + } + // we only care about the image ID + crictlOut := struct { + Status struct { + ID string `json:"id"` + } `json:"status"` + }{} + if err := json.Unmarshal(out.Bytes(), &crictlOut); err != nil { + return "", err + } + return crictlOut.Status.ID, nil +} + +// ImageTags is used to perform a reverse lookup of the ImageID to list set of available +// RepoTags corresponding to the ImageID in question +func ImageTags(n nodes.Node, imageID string) (map[string]bool, error) { + var out bytes.Buffer + tags := make(map[string]bool, 0) + if err := n.Command("crictl", "inspecti", imageID).SetStdout(&out).Run(); err != nil { + return tags, err + } + crictlOut := struct { + Status struct { + RepoTags []string `json:"repoTags"` + } `json:"status"` + }{} + if err := json.Unmarshal(out.Bytes(), &crictlOut); err != nil { + return tags, err + } + for _, tag := range crictlOut.Status.RepoTags { + tags[tag] = true + } + return tags, nil +} + +// ReTagImage is used to tag an ImageID with a custom tag specified by imageName parameter +func ReTagImage(n nodes.Node, imageID, imageName string) error { + var out bytes.Buffer + return n.Command("ctr", "--namespace=k8s.io", "images", "tag", "--force", imageID, imageName).SetStdout(&out).Run() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go new file mode 100644 index 000000000..3cff17478 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go @@ -0,0 +1,246 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "os" + "path/filepath" + "sort" + + "sigs.k8s.io/kind/pkg/cmd/kind/version" + + "sigs.k8s.io/kind/pkg/cluster/constants" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/log" + + internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create" + internaldelete "sigs.k8s.io/kind/pkg/cluster/internal/delete" + "sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig" + internalproviders "sigs.k8s.io/kind/pkg/cluster/internal/providers" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/docker" + "sigs.k8s.io/kind/pkg/cluster/internal/providers/podman" +) + +// DefaultName is the default cluster name +const DefaultName = constants.DefaultClusterName + +// defaultName is a helper that given a name defaults it if unset +func defaultName(name string) string { + if name == "" { + name = DefaultName + } + return name +} + +// Provider is used to perform cluster operations +type Provider struct { + provider internalproviders.Provider + logger log.Logger +} + +// NewProvider returns a new provider based on the supplied options +func NewProvider(options ...ProviderOption) *Provider { + p := &Provider{ + logger: log.NoopLogger{}, + } + // Ensure we apply the logger options first, while maintaining the order + // otherwise. This way we can trivially init the internal provider with + // the logger. + sort.SliceStable(options, func(i, j int) bool { + _, iIsLogger := options[i].(providerLoggerOption) + _, jIsLogger := options[j].(providerLoggerOption) + return iIsLogger && !jIsLogger + }) + for _, o := range options { + if o != nil { + o.apply(p) + } + } + + // ensure a provider if none was set + // NOTE: depends on logger being set (see sorting above) + if p.provider == nil { + // DetectNodeProvider does not fallback to allow callers to determine + // this behavior + // However for compatibility if the caller of NewProvider supplied no + // option and we autodetect internally, we default to the docker provider + // for fallback, to avoid a breaking change for now. + // This may change in the future. + // TODO: consider breaking this API for earlier errors. + providerOpt, _ := DetectNodeProvider() + if providerOpt == nil { + providerOpt = ProviderWithDocker() + } + providerOpt.apply(p) + } + return p +} + +// NoNodeProviderDetectedError indicates that we could not autolocate an available +// NodeProvider backend on the host +var NoNodeProviderDetectedError = errors.NewWithoutStack("failed to detect any supported node provider") + +// DetectNodeProvider allows callers to autodetect the node provider +// *without* fallback to the default. +// +// Pass the returned ProviderOption to NewProvider to pass the auto-detect Docker +// or Podman option explicitly (in the future there will be more options) +// +// NOTE: The kind *cli* also checks `KIND_EXPERIMENTAL_PROVIDER` for "podman" or +// "docker" currently and does not auto-detect / respects this if set. +// +// This will be replaced with some other mechanism in the future (likely when +// podman support is GA), in the meantime though your tool may wish to match this. +// +// In the future when this is not considered experimental, +// that logic will be in a public API as well. +func DetectNodeProvider() (ProviderOption, error) { + // auto-detect based on each node provider's IsAvailable() function + if docker.IsAvailable() { + return ProviderWithDocker(), nil + } + if podman.IsAvailable() { + return ProviderWithPodman(), nil + } + return nil, errors.WithStack(NoNodeProviderDetectedError) +} + +// ProviderOption is an option for configuring a provider +type ProviderOption interface { + apply(p *Provider) +} + +// providerLoggerOption is a trivial ProviderOption adapter +// we use a type specific to logging options so we can handle them first +type providerLoggerOption func(p *Provider) + +func (a providerLoggerOption) apply(p *Provider) { + a(p) +} + +var _ ProviderOption = providerLoggerOption(nil) + +// ProviderWithLogger configures the provider to use Logger logger +func ProviderWithLogger(logger log.Logger) ProviderOption { + return providerLoggerOption(func(p *Provider) { + p.logger = logger + }) +} + +// providerRuntimeOption is a trivial ProviderOption adapter +// we use a type specific to logging options so we can handle them first +type providerRuntimeOption func(p *Provider) + +func (a providerRuntimeOption) apply(p *Provider) { + a(p) +} + +var _ ProviderOption = providerRuntimeOption(nil) + +// ProviderWithDocker configures the provider to use docker runtime +func ProviderWithDocker() ProviderOption { + return providerRuntimeOption(func(p *Provider) { + p.provider = docker.NewProvider(p.logger) + }) +} + +// ProviderWithPodman configures the provider to use podman runtime +func ProviderWithPodman() ProviderOption { + return providerRuntimeOption(func(p *Provider) { + p.provider = podman.NewProvider(p.logger) + }) +} + +// Create provisions and starts a kubernetes-in-docker cluster +func (p *Provider) Create(name string, options ...CreateOption) error { + // apply options + opts := &internalcreate.ClusterOptions{ + NameOverride: name, + } + for _, o := range options { + if err := o.apply(opts); err != nil { + return err + } + } + return internalcreate.Cluster(p.logger, p.provider, opts) +} + +// Delete tears down a kubernetes-in-docker cluster +func (p *Provider) Delete(name, explicitKubeconfigPath string) error { + return internaldelete.Cluster(p.logger, p.provider, defaultName(name), explicitKubeconfigPath) +} + +// List returns a list of clusters for which nodes exist +func (p *Provider) List() ([]string, error) { + return p.provider.ListClusters() +} + +// KubeConfig returns the KUBECONFIG for the cluster +// If internal is true, this will contain the internal IP etc. +// If internal is false, this will contain the host IP etc. +func (p *Provider) KubeConfig(name string, internal bool) (string, error) { + return kubeconfig.Get(p.provider, defaultName(name), !internal) +} + +// ExportKubeConfig exports the KUBECONFIG for the cluster, merging +// it into the selected file, following the rules from +// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config +// where explicitPath is the --kubeconfig value. +func (p *Provider) ExportKubeConfig(name string, explicitPath string, internal bool) error { + return kubeconfig.Export(p.provider, defaultName(name), explicitPath, !internal) +} + +// ListNodes returns the list of container IDs for the "nodes" in the cluster +func (p *Provider) ListNodes(name string) ([]nodes.Node, error) { + return p.provider.ListNodes(defaultName(name)) +} + +// ListInternalNodes returns the list of container IDs for the "nodes" in the cluster +// that are not external +func (p *Provider) ListInternalNodes(name string) ([]nodes.Node, error) { + n, err := p.provider.ListNodes(name) + if err != nil { + return nil, err + } + return nodeutils.InternalNodes(n) +} + +// CollectLogs will populate dir with cluster logs and other debug files +func (p *Provider) CollectLogs(name, dir string) error { + // TODO: should use ListNodes and Collect should handle nodes differently + // based on role ... + n, err := p.ListInternalNodes(name) + if err != nil { + return err + } + // ensure directory + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return errors.Wrap(err, "failed to create logs directory") + } + // write kind version + if err := os.WriteFile( + filepath.Join(dir, "kind-version.txt"), + []byte(version.DisplayVersion()), + 0666, // match os.Create + ); err != nil { + return errors.Wrap(err, "failed to write kind-version.txt") + } + // collect and write cluster logs + return p.provider.CollectLogs(dir, n) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go b/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go new file mode 100644 index 000000000..ae20f68a7 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go @@ -0,0 +1,15 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cmd provides helpers used by kind's commands / cli +package cmd diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go b/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go new file mode 100644 index 000000000..2903235de --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" +) + +// IOStreams provides the standard names for iostreams. +// This is useful for embedding and for unit testing. +// Inconsistent and different names make it hard to read and review code +// This is based on cli-runtime, but just the nice type without the dependency +type IOStreams struct { + // In think, os.Stdin + In io.Reader + // Out think, os.Stdout + Out io.Writer + // ErrOut think, os.Stderr + ErrOut io.Writer +} + +// StandardIOStreams returns an IOStreams from os.Stdin, os.Stdout +func StandardIOStreams() IOStreams { + return IOStreams{ + In: os.Stdin, + Out: os.Stdout, + ErrOut: os.Stderr, + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go new file mode 100644 index 000000000..9f010fdda --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go @@ -0,0 +1,97 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version implements the `version` command +package version + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" + + "sigs.k8s.io/kind/pkg/cmd" + "sigs.k8s.io/kind/pkg/log" +) + +// Version returns the kind CLI Semantic Version +func Version() string { + v := versionCore + // add pre-release version info if we have it + if versionPreRelease != "" { + v += "-" + versionPreRelease + // If gitCommitCount was set, add to the pre-release version + if gitCommitCount != "" { + v += "." + gitCommitCount + } + // if commit was set, add the + + // we only do this for pre-release versions + if gitCommit != "" { + // NOTE: use 14 character short hash, like Kubernetes + v += "+" + truncate(gitCommit, 14) + } + } + return v +} + +// DisplayVersion is Version() display formatted, this is what the version +// subcommand prints +func DisplayVersion() string { + return "kind v" + Version() + " " + runtime.Version() + " " + runtime.GOOS + "/" + runtime.GOARCH +} + +// versionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0 +const versionCore = "0.20.0" + +// versionPreRelease is the base pre-release portion of the kind CLI version per +// Semantic Versioning 2.0.0 +const versionPreRelease = "" + +// gitCommitCount count the commits since the last release. +// It is injected at build time. +var gitCommitCount = "" + +// gitCommit is the commit used to build the kind binary, if available. +// It is injected at build time. +var gitCommit = "" + +// NewCommand returns a new cobra.Command for version +func NewCommand(logger log.Logger, streams cmd.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Args: cobra.NoArgs, + Use: "version", + Short: "Prints the kind CLI version", + Long: "Prints the kind CLI version", + RunE: func(cmd *cobra.Command, args []string) error { + if logger.V(0).Enabled() { + // if not -q / --quiet, show lots of info + fmt.Fprintln(streams.Out, DisplayVersion()) + } else { + // otherwise only show semver + fmt.Fprintln(streams.Out, Version()) + } + return nil + }, + } + return cmd +} + +func truncate(s string, maxLen int) string { + if len(s) < maxLen { + return s + } + return s[:maxLen] +} diff --git a/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go b/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go new file mode 100644 index 000000000..eaf0f944c --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/internal/cli" + "sigs.k8s.io/kind/pkg/internal/env" +) + +// NewLogger returns the standard logger used by the kind CLI +// This logger writes to os.Stderr +func NewLogger() log.Logger { + var writer io.Writer = os.Stderr + if env.IsSmartTerminal(writer) { + writer = cli.NewSpinner(writer) + } + return cli.NewLogger(writer, 0) +} + +// ColorEnabled returns true if color is enabled for the logger +// this should be used to control output +func ColorEnabled(logger log.Logger) bool { + type maybeColorer interface { + ColorEnabled() bool + } + v, ok := logger.(maybeColorer) + return ok && v.ColorEnabled() +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go new file mode 100644 index 000000000..5258cd2e1 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +// NewAggregate is a k8s.io/apimachinery/pkg/util/errors.NewAggregate compatible wrapper +// note that while it returns a StackTrace wrapped Aggregate +// That has been Flattened and Reduced +func NewAggregate(errlist []error) error { + return WithStack( + reduce( + flatten( + newAggregate(errlist), + ), + ), + ) +} + +// Errors returns the deepest Aggregate in a Cause chain +func Errors(err error) []error { + var errors Aggregate + for { + if v, ok := err.(Aggregate); ok { + errors = v + } + if causerErr, ok := err.(Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + if errors != nil { + return errors.Errors() + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go new file mode 100644 index 000000000..3e9ec30b4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go @@ -0,0 +1,167 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + + "sigs.k8s.io/kind/pkg/internal/sets" +) + +/* + The contents of this file are lightly forked from k8s.io/apimachinery/pkg/util/errors + Forking makes kind easier to import, and this code is stable. + + Currently the only source changes are renaming some methods so as to not + export them. +*/ + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +// The aggregate can be used with `errors.Is()` to check for the occurrence of +// a specific error type. +// Errors.As() is not supported, because the caller presumably cares about a +// specific error of potentially multiple that match the given type. +// +// NOTE: this type is originally from k8s.io/apimachinery/pkg/util/errors.Aggregate +// Since it is an interface, you can use the implementing types interchangeably +type Aggregate interface { + error + Errors() []error + Is(error) bool +} + +func newAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return newAggregate(result) +} + +// reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + seenerrs := sets.NewString() + result := "" + agg.visit(func(err error) bool { + msg := err.Error() + if seenerrs.Has(msg) { + return false + } + seenerrs.Insert(msg) + if len(seenerrs) > 1 { + result += ", " + } + result += msg + return false + }) + if len(seenerrs) == 1 { + return result + } + return "[" + result + "]" +} + +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go b/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go new file mode 100644 index 000000000..1d90ad197 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "sync" +) + +// UntilErrorConcurrent runs all funcs in separate goroutines, returning the +// first non-nil error returned from funcs, or nil if all funcs return nil +func UntilErrorConcurrent(funcs []func() error) error { + errCh := make(chan error, len(funcs)) + for _, f := range funcs { + f := f // capture f + go func() { + errCh <- f() + }() + } + for i := 0; i < len(funcs); i++ { + if err := <-errCh; err != nil { + return err + } + } + return nil +} + +// AggregateConcurrent runs fns concurrently, returning a NewAggregate if there are > 1 errors +func AggregateConcurrent(funcs []func() error) error { + // run all fns concurrently + ch := make(chan error, len(funcs)) + var wg sync.WaitGroup + for _, f := range funcs { + f := f // capture f + wg.Add(1) + go func() { + defer wg.Done() + ch <- f() + }() + } + wg.Wait() + close(ch) + // collect up and return errors + errs := []error{} + for err := range ch { + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 1 { + return NewAggregate(errs) + } else if len(errs) == 1 { + return errs[0] + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/doc.go b/vendor/sigs.k8s.io/kind/pkg/errors/doc.go new file mode 100644 index 000000000..c93a68db3 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors provides common utilities for dealing with errors +package errors diff --git a/vendor/sigs.k8s.io/kind/pkg/errors/errors.go b/vendor/sigs.k8s.io/kind/pkg/errors/errors.go new file mode 100644 index 000000000..98bc47bf2 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/errors/errors.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + stderrors "errors" + + pkgerrors "github.com/pkg/errors" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return pkgerrors.New(message) +} + +// NewWithoutStack is like new but does NOT wrap with a stack +// This is useful for exported errors +func NewWithoutStack(message string) error { + return stderrors.New(message) +} + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. Errorf also records the stack trace at the +// point it was called. +func Errorf(format string, args ...interface{}) error { + return pkgerrors.Errorf(format, args...) +} + +// Wrap returns an error annotating err with a stack trace at the point Wrap +// is called, and the supplied message. If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + return pkgerrors.Wrap(err, message) +} + +// Wrapf returns an error annotating err with a stack trace at the point Wrapf +// is called, and the format specifier. If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + return pkgerrors.Wrapf(err, format, args...) +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + return pkgerrors.WithStack(err) +} + +// Causer is an interface to github.com/pkg/errors error's Cause() wrapping +type Causer interface { + // Cause returns the underlying error + Cause() error +} + +// StackTracer is an interface to github.com/pkg/errors error's StackTrace() +type StackTracer interface { + // StackTrace returns the StackTrace ... + // TODO: return our own type instead? + // https://github.com/pkg/errors#roadmap + StackTrace() pkgerrors.StackTrace +} + +// StackTrace returns the deepest StackTrace in a Cause chain +// https://github.com/pkg/errors/issues/173 +func StackTrace(err error) pkgerrors.StackTrace { + var stackErr error + for { + if _, ok := err.(StackTracer); ok { + stackErr = err + } + if causerErr, ok := err.(Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + if stackErr != nil { + return stackErr.(StackTracer).StackTrace() + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/default.go b/vendor/sigs.k8s.io/kind/pkg/exec/default.go new file mode 100644 index 000000000..98a215cdf --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/default.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import "context" + +// DefaultCmder is a LocalCmder instance used for convenience, packages +// originally using os/exec.Command can instead use pkg/kind/exec.Command +// which forwards to this instance +// TODO(bentheelder): swap this for testing +// TODO(bentheelder): consider not using a global for this :^) +var DefaultCmder = &LocalCmder{} + +// Command is a convenience wrapper over DefaultCmder.Command +func Command(command string, args ...string) Cmd { + return DefaultCmder.Command(command, args...) +} + +// CommandContext is a convenience wrapper over DefaultCmder.CommandContext +func CommandContext(ctx context.Context, command string, args ...string) Cmd { + return DefaultCmder.CommandContext(ctx, command, args...) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/doc.go b/vendor/sigs.k8s.io/kind/pkg/exec/doc.go new file mode 100644 index 000000000..68214779b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec contains an interface for executing commands, along with helpers +// TODO(bentheelder): add standardized timeout functionality & a default timeout +// so that commands cannot hang indefinitely (!) +package exec diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go b/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go new file mode 100644 index 000000000..e579589e8 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go @@ -0,0 +1,142 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bufio" + "bytes" + "io" + "os" + "strings" + + "github.com/alessio/shellescape" + + "sigs.k8s.io/kind/pkg/errors" +) + +// PrettyCommand takes arguments identical to Cmder.Command, +// it returns a pretty printed command that could be pasted into a shell +func PrettyCommand(name string, args ...string) string { + var out strings.Builder + out.WriteString(shellescape.Quote(name)) + for _, arg := range args { + out.WriteByte(' ') + out.WriteString(shellescape.Quote(arg)) + } + return out.String() +} + +// RunErrorForError returns a RunError if the error contains a RunError. +// Otherwise it returns nil +func RunErrorForError(err error) *RunError { + var runError *RunError + for { + if rErr, ok := err.(*RunError); ok { + runError = rErr + } + if causerErr, ok := err.(errors.Causer); ok { + err = causerErr.Cause() + } else { + break + } + } + return runError +} + +// CombinedOutputLines is like os/exec's cmd.CombinedOutput(), +// but over our Cmd interface, and instead of returning the byte buffer of +// stderr + stdout, it scans these for lines and returns a slice of output lines +func CombinedOutputLines(cmd Cmd) (lines []string, err error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + cmd.SetStderr(&buff) + err = cmd.Run() + scanner := bufio.NewScanner(&buff) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, err +} + +// OutputLines is like os/exec's cmd.Output(), +// but over our Cmd interface, and instead of returning the byte buffer of +// stdout, it scans these for lines and returns a slice of output lines +func OutputLines(cmd Cmd) (lines []string, err error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + err = cmd.Run() + scanner := bufio.NewScanner(&buff) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, err +} + +// Output is like os/exec's cmd.Output, but over our Cmd interface +func Output(cmd Cmd) ([]byte, error) { + var buff bytes.Buffer + cmd.SetStdout(&buff) + err := cmd.Run() + return buff.Bytes(), err +} + +// InheritOutput sets cmd's output to write to the current process's stdout and stderr +func InheritOutput(cmd Cmd) Cmd { + cmd.SetStderr(os.Stderr) + cmd.SetStdout(os.Stdout) + return cmd +} + +// RunWithStdoutReader runs cmd with stdout piped to readerFunc +func RunWithStdoutReader(cmd Cmd, readerFunc func(io.Reader) error) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + cmd.SetStdout(pw) + + return errors.AggregateConcurrent([]func() error{ + func() error { + defer pr.Close() + return readerFunc(pr) + }, + func() error { + defer pw.Close() + return cmd.Run() + }, + }) +} + +// RunWithStdinWriter runs cmd with writerFunc piped to stdin +func RunWithStdinWriter(cmd Cmd, writerFunc func(io.Writer) error) error { + pr, pw, err := os.Pipe() + if err != nil { + return err + } + cmd.SetStdin(pr) + + return errors.AggregateConcurrent([]func() error{ + func() error { + defer pw.Close() + return writerFunc(pw) + }, + func() error { + defer pr.Close() + return cmd.Run() + }, + }) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/local.go b/vendor/sigs.k8s.io/kind/pkg/exec/local.go new file mode 100644 index 000000000..65bb73c9a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/local.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bytes" + "context" + "io" + osexec "os/exec" + "sync" + + "sigs.k8s.io/kind/pkg/errors" +) + +// LocalCmd wraps os/exec.Cmd, implementing the kind/pkg/exec.Cmd interface +type LocalCmd struct { + *osexec.Cmd +} + +var _ Cmd = &LocalCmd{} + +// LocalCmder is a factory for LocalCmd, implementing Cmder +type LocalCmder struct{} + +var _ Cmder = &LocalCmder{} + +// Command returns a new exec.Cmd backed by Cmd +func (c *LocalCmder) Command(name string, arg ...string) Cmd { + return &LocalCmd{ + Cmd: osexec.Command(name, arg...), + } +} + +// CommandContext is like Command but includes a context +func (c *LocalCmder) CommandContext(ctx context.Context, name string, arg ...string) Cmd { + return &LocalCmd{ + Cmd: osexec.CommandContext(ctx, name, arg...), + } +} + +// SetEnv sets env +func (cmd *LocalCmd) SetEnv(env ...string) Cmd { + cmd.Env = env + return cmd +} + +// SetStdin sets stdin +func (cmd *LocalCmd) SetStdin(r io.Reader) Cmd { + cmd.Stdin = r + return cmd +} + +// SetStdout set stdout +func (cmd *LocalCmd) SetStdout(w io.Writer) Cmd { + cmd.Stdout = w + return cmd +} + +// SetStderr sets stderr +func (cmd *LocalCmd) SetStderr(w io.Writer) Cmd { + cmd.Stderr = w + return cmd +} + +// Run runs the command +// If the returned error is non-nil, it should be of type *RunError +func (cmd *LocalCmd) Run() error { + // Background: + // Go's stdlib will setup and use a shared fd when cmd.Stderr == cmd.Stdout + // In any other case, it will use different fds, which will involve + // two different io.Copy goroutines writing to cmd.Stderr and cmd.Stdout + // + // Given this, we must synchronize capturing the output to a buffer + // IFF ! interfaceEqual(cmd.Sterr, cmd.Stdout) + var combinedOutput bytes.Buffer + var combinedOutputWriter io.Writer = &combinedOutput + if cmd.Stdout == nil && cmd.Stderr == nil { + // Case 1: If stdout and stderr are nil, we can just use the buffer + // The buffer will be == and Go will use one fd / goroutine + cmd.Stdout = combinedOutputWriter + cmd.Stderr = combinedOutputWriter + } else if interfaceEqual(cmd.Stdout, cmd.Stderr) { + // Case 2: If cmd.Stdout == cmd.Stderr go will still share the fd, + // but we need to wrap with a MultiWriter to respect the other writer + // and our buffer. + // The MultiWriter will be == and Go will use one fd / goroutine + cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter) + cmd.Stderr = cmd.Stdout + } else { + // Case 3: If cmd.Stdout != cmd.Stderr, we need to synchronize the + // combined output writer. + // Go will use different fds / write routines for stdout and stderr + combinedOutputWriter = &mutexWriter{ + writer: &combinedOutput, + } + // wrap writers if non-nil + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter) + } else { + cmd.Stdout = combinedOutputWriter + } + if cmd.Stderr != nil { + cmd.Stderr = io.MultiWriter(cmd.Stderr, combinedOutputWriter) + } else { + cmd.Stderr = combinedOutputWriter + } + } + // TODO: should be in the caller or logger should be injected somehow ... + if err := cmd.Cmd.Run(); err != nil { + return errors.WithStack(&RunError{ + Command: cmd.Args, + Output: combinedOutput.Bytes(), + Inner: err, + }) + } + return nil +} + +// interfaceEqual protects against panics from doing equality tests on +// two interfaces with non-comparable underlying types. +// This trivial is borrowed from the go stdlib in os/exec +// Note that the recover will only happen if a is not comparable to b, +// in which case we'll return false +// We've lightly modified this to pass errcheck (explicitly ignoring recover) +func interfaceEqual(a, b interface{}) bool { + defer func() { + _ = recover() + }() + return a == b +} + +// mutexWriter is a simple synchronized wrapper around an io.Writer +type mutexWriter struct { + writer io.Writer + mu sync.Mutex +} + +func (m *mutexWriter) Write(b []byte) (int, error) { + m.mu.Lock() + defer m.mu.Unlock() + n, err := m.writer.Write(b) + return n, err +} diff --git a/vendor/sigs.k8s.io/kind/pkg/exec/types.go b/vendor/sigs.k8s.io/kind/pkg/exec/types.go new file mode 100644 index 000000000..4ce60431e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/exec/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "fmt" + "io" +) + +// Cmd abstracts over running a command somewhere, this is useful for testing +type Cmd interface { + // Run executes the command (like os/exec.Cmd.Run), it should return + // a *RunError if there is any error + Run() error + // Each entry should be of the form "key=value" + SetEnv(...string) Cmd + SetStdin(io.Reader) Cmd + SetStdout(io.Writer) Cmd + SetStderr(io.Writer) Cmd +} + +// Cmder abstracts over creating commands +type Cmder interface { + // command, args..., just like os/exec.Cmd + Command(string, ...string) Cmd + CommandContext(context.Context, string, ...string) Cmd +} + +// RunError represents an error running a Cmd +type RunError struct { + Command []string // [Name Args...] + Output []byte // Captured Stdout / Stderr of the command + Inner error // Underlying error if any +} + +var _ error = &RunError{} + +func (e *RunError) Error() string { + // TODO(BenTheElder): implement formatter, and show output for %+v ? + return fmt.Sprintf("command \"%s\" failed with error: %v", e.PrettyCommand(), e.Inner) +} + +// PrettyCommand pretty prints the command in a way that could be pasted +// into a shell +func (e *RunError) PrettyCommand() string { + return PrettyCommand(e.Command[0], e.Command[1:]...) +} + +// Cause mimics github.com/pkg/errors's Cause pattern for errors +func (e *RunError) Cause() error { + if e.Inner != nil { + return e.Inner + } + return e +} diff --git a/vendor/sigs.k8s.io/kind/pkg/fs/fs.go b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go new file mode 100644 index 000000000..7fb4eae33 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/fs/fs.go @@ -0,0 +1,156 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fs contains utilities for interacting with the host filesystem +// in a docker friendly way +// TODO(bentheelder): this should be internal +package fs + +import ( + "io" + "os" + "path" + "path/filepath" + "runtime" + "strings" +) + +// TempDir is like os.MkdirTemp, but more docker friendly +func TempDir(dir, prefix string) (name string, err error) { + // create a tempdir as normal + name, err = os.MkdirTemp(dir, prefix) + if err != nil { + return "", err + } + // on macOS $TMPDIR is typically /var/..., which is not mountable + // /private/var/... is the mountable equivalent + if runtime.GOOS == "darwin" && strings.HasPrefix(name, "/var/") { + name = filepath.Join("/private", name) + } + return name, nil +} + +// IsAbs is like filepath.IsAbs but also considering posix absolute paths +// to be absolute even if filepath.IsAbs would not +// This fixes the case of Posix paths on Windows +func IsAbs(hostPath string) bool { + return path.IsAbs(hostPath) || filepath.IsAbs(hostPath) +} + +// Copy recursively directories, symlinks, files copies from src to dst +// Copy will make dirs as necessary, and keep file modes +// Symlinks will be dereferenced similar to `cp -r src dst` +func Copy(src, dst string) error { + // get source info + info, err := os.Lstat(src) + if err != nil { + return err + } + // make sure dest dir exists + if err := os.MkdirAll(filepath.Dir(dst), os.ModePerm); err != nil { + return err + } + // do real copy work + return copy(src, dst, info) +} + +func copy(src, dst string, info os.FileInfo) error { + if info.Mode()&os.ModeSymlink != 0 { + return copySymlink(src, dst) + } + if info.IsDir() { + return copyDir(src, dst, info) + } + return copyFile(src, dst, info) +} + +// CopyFile copies a file from src to dst +func CopyFile(src, dst string) (err error) { + // get source information + info, err := os.Stat(src) + if err != nil { + return err + } + return copyFile(src, dst, info) +} + +func copyFile(src, dst string, info os.FileInfo) error { + // open src for reading + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + // create dst file + // this is like f, err := os.Create(dst); os.Chmod(f.Name(), src.Mode()) + out, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + // make sure we close the file + defer func() { + closeErr := out.Close() + // if we weren't returning an error + if err == nil { + err = closeErr + } + }() + // actually copy + if _, err = io.Copy(out, in); err != nil { + return err + } + err = out.Sync() + return err +} + +// copySymlink dereferences and then copies a symlink +func copySymlink(src, dst string) error { + // read through the symlink + realSrc, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + info, err := os.Lstat(realSrc) + if err != nil { + return err + } + // copy the underlying contents + return copy(realSrc, dst, info) +} + +func copyDir(src, dst string, info os.FileInfo) error { + // make sure the target dir exists + if err := os.MkdirAll(dst, info.Mode()); err != nil { + return err + } + // copy every source dir entry + entries, err := os.ReadDir(src) + if err != nil { + return err + } + for _, entry := range entries { + entrySrc := filepath.Join(src, entry.Name()) + entryDst := filepath.Join(dst, entry.Name()) + fileInfo, err := entry.Info() + if err != nil { + return err + } + if err := copy(entrySrc, entryDst, fileInfo); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go new file mode 100644 index 000000000..79d3387fa --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go @@ -0,0 +1,34 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +// ClusterHasIPv6 returns true if the cluster should have IPv6 enabled due to either +// being IPv6 cluster family or Dual Stack +func ClusterHasIPv6(c *Cluster) bool { + return c.Networking.IPFamily == IPv6Family || c.Networking.IPFamily == DualStackFamily +} + +// ClusterHasImplicitLoadBalancer returns true if this cluster has an implicit api-server LoadBalancer +func ClusterHasImplicitLoadBalancer(c *Cluster) bool { + controlPlanes := 0 + for _, node := range c.Nodes { + if node.Role == ControlPlaneRole { + controlPlanes++ + } + } + return controlPlanes > 1 +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go new file mode 100644 index 000000000..2df4b7513 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + v1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" +) + +// Convertv1alpha4 converts a v1alpha4 cluster to a cluster at the internal API version +func Convertv1alpha4(in *v1alpha4.Cluster) *Cluster { + in = in.DeepCopy() // deep copy first to avoid touching the original + out := &Cluster{ + Name: in.Name, + Nodes: make([]Node, len(in.Nodes)), + FeatureGates: in.FeatureGates, + RuntimeConfig: in.RuntimeConfig, + KubeadmConfigPatches: in.KubeadmConfigPatches, + KubeadmConfigPatchesJSON6902: make([]PatchJSON6902, len(in.KubeadmConfigPatchesJSON6902)), + ContainerdConfigPatches: in.ContainerdConfigPatches, + ContainerdConfigPatchesJSON6902: in.ContainerdConfigPatchesJSON6902, + } + + for i := range in.Nodes { + convertv1alpha4Node(&in.Nodes[i], &out.Nodes[i]) + } + + convertv1alpha4Networking(&in.Networking, &out.Networking) + + for i := range in.KubeadmConfigPatchesJSON6902 { + convertv1alpha4PatchJSON6902(&in.KubeadmConfigPatchesJSON6902[i], &out.KubeadmConfigPatchesJSON6902[i]) + } + + return out +} + +func convertv1alpha4Node(in *v1alpha4.Node, out *Node) { + out.Role = NodeRole(in.Role) + out.Image = in.Image + + out.Labels = in.Labels + out.KubeadmConfigPatches = in.KubeadmConfigPatches + out.ExtraMounts = make([]Mount, len(in.ExtraMounts)) + out.ExtraPortMappings = make([]PortMapping, len(in.ExtraPortMappings)) + out.KubeadmConfigPatchesJSON6902 = make([]PatchJSON6902, len(in.KubeadmConfigPatchesJSON6902)) + + for i := range in.ExtraMounts { + convertv1alpha4Mount(&in.ExtraMounts[i], &out.ExtraMounts[i]) + } + + for i := range in.ExtraPortMappings { + convertv1alpha4PortMapping(&in.ExtraPortMappings[i], &out.ExtraPortMappings[i]) + } + + for i := range in.KubeadmConfigPatchesJSON6902 { + convertv1alpha4PatchJSON6902(&in.KubeadmConfigPatchesJSON6902[i], &out.KubeadmConfigPatchesJSON6902[i]) + } +} + +func convertv1alpha4PatchJSON6902(in *v1alpha4.PatchJSON6902, out *PatchJSON6902) { + out.Group = in.Group + out.Version = in.Version + out.Kind = in.Kind + out.Patch = in.Patch +} + +func convertv1alpha4Networking(in *v1alpha4.Networking, out *Networking) { + out.IPFamily = ClusterIPFamily(in.IPFamily) + out.APIServerPort = in.APIServerPort + out.APIServerAddress = in.APIServerAddress + out.PodSubnet = in.PodSubnet + out.KubeProxyMode = ProxyMode(in.KubeProxyMode) + out.ServiceSubnet = in.ServiceSubnet + out.DisableDefaultCNI = in.DisableDefaultCNI + out.DNSSearch = in.DNSSearch +} + +func convertv1alpha4Mount(in *v1alpha4.Mount, out *Mount) { + out.ContainerPath = in.ContainerPath + out.HostPath = in.HostPath + out.Readonly = in.Readonly + out.SelinuxRelabel = in.SelinuxRelabel + out.Propagation = MountPropagation(in.Propagation) +} + +func convertv1alpha4PortMapping(in *v1alpha4.PortMapping, out *PortMapping) { + out.ContainerPort = in.ContainerPort + out.HostPort = in.HostPort + out.ListenAddress = in.ListenAddress + out.Protocol = PortMappingProtocol(in.Protocol) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go new file mode 100644 index 000000000..7a93df802 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this comment makes golint ignore this file, feel free to edit the file. +// Code generated by not-actually-generated-but-go-away-golint. DO NOT EDIT. +// https://github.com/kubernetes/code-generator/issues/30 + +package config + +import ( + "sigs.k8s.io/kind/pkg/apis/config/defaults" + "sigs.k8s.io/kind/pkg/cluster/constants" +) + +// SetDefaultsCluster sets uninitialized fields to their default value. +func SetDefaultsCluster(obj *Cluster) { + // default cluster name + if obj.Name == "" { + obj.Name = constants.DefaultClusterName + } + + // default to a one node cluster + if len(obj.Nodes) == 0 { + obj.Nodes = []Node{ + { + Image: defaults.Image, + Role: ControlPlaneRole, + }, + } + } + + // default nodes + for i := range obj.Nodes { + a := &obj.Nodes[i] + SetDefaultsNode(a) + } + if obj.Networking.IPFamily == "" { + obj.Networking.IPFamily = IPv4Family + } + + // default to listening on 127.0.0.1:randomPort on ipv4 + // and [::1]:randomPort on ipv6 + if obj.Networking.APIServerAddress == "" { + obj.Networking.APIServerAddress = "127.0.0.1" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.APIServerAddress = "::1" + } + } + + // default the pod CIDR + if obj.Networking.PodSubnet == "" { + obj.Networking.PodSubnet = "10.244.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + // node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices + // xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else- + obj.Networking.PodSubnet = "fd00:10:244::/56" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.PodSubnet = "10.244.0.0/16,fd00:10:244::/56" + } + } + + // default the service CIDR using the kubeadm default + // https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32 + // Note: kubeadm is using a /12 subnet, that may allocate a 2^20 bitmap in etcd + // we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services) + if obj.Networking.ServiceSubnet == "" { + obj.Networking.ServiceSubnet = "10.96.0.0/16" + if obj.Networking.IPFamily == IPv6Family { + obj.Networking.ServiceSubnet = "fd00:10:96::/112" + } + if obj.Networking.IPFamily == DualStackFamily { + obj.Networking.ServiceSubnet = "10.96.0.0/16,fd00:10:96::/112" + } + } + // default the KubeProxyMode using iptables as it's already the default + if obj.Networking.KubeProxyMode == "" { + obj.Networking.KubeProxyMode = IPTablesProxyMode + } +} + +// SetDefaultsNode sets uninitialized fields to their default value. +func SetDefaultsNode(obj *Node) { + if obj.Image == "" { + obj.Image = defaults.Image + } + + if obj.Role == "" { + obj.Role = ControlPlaneRole + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go new file mode 100644 index 000000000..520aac207 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the current apiVersion of the `kind` Config +// along with some common abstractions +// +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=sigs.k8s.io/kind/pkg/internal/apis/config +// +k8s:defaulter-gen=TypeMeta +package config diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go new file mode 100644 index 000000000..eded931b3 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encoding + +import ( + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// V1Alpha4ToInternal converts to the internal API version +func V1Alpha4ToInternal(cluster *v1alpha4.Cluster) *config.Cluster { + v1alpha4.SetDefaultsCluster(cluster) + return config.Convertv1alpha4(cluster) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go new file mode 100644 index 000000000..f6f90a9ef --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package encoding implements utilities for decoding from yaml the `kind` Config +package encoding diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go new file mode 100644 index 000000000..718d5e46b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encoding + +import ( + "bytes" + "os" + + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// Load reads the file at path and attempts to convert into a `kind` Config; the file +// can be one of the different API versions defined in scheme. +// If path == "" then the default config is returned +// If path == "-" then reads from stdin +func Load(path string) (*config.Cluster, error) { + // special case: empty path -> default config + // TODO(bentheelder): consider removing this + if path == "" { + out := &config.Cluster{} + config.SetDefaultsCluster(out) + return out, nil + } + + // read in file + raw, err := os.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "error reading file") + } + + return Parse(raw) +} + +// Parse parses a cluster config from raw (yaml) bytes +// It will always return the current internal version after defaulting and +// conversion from the read version +func Parse(raw []byte) (*config.Cluster, error) { + // get kind & apiVersion + tm := typeMeta{} + if err := yaml.Unmarshal(raw, &tm); err != nil { + return nil, errors.Wrap(err, "could not determine kind / apiVersion for config") + } + + // decode specific (apiVersion, kind) + switch tm.APIVersion { + // handle v1alpha4 + case "kind.x-k8s.io/v1alpha4": + if tm.Kind != "Cluster" { + return nil, errors.Errorf("unknown kind %s for apiVersion: %s", tm.Kind, tm.APIVersion) + } + // load version + cfg := &v1alpha4.Cluster{} + if err := yamlUnmarshalStrict(raw, cfg); err != nil { + return nil, errors.Wrap(err, "unable to decode config") + } + // apply defaults for version and convert + return V1Alpha4ToInternal(cfg), nil + } + + // unknown apiVersion if we haven't already returned ... + return nil, errors.Errorf("unknown apiVersion: %s", tm.APIVersion) +} + +// basically metav1.TypeMeta, but with yaml tags +type typeMeta struct { + Kind string `yaml:"kind,omitempty"` + APIVersion string `yaml:"apiVersion,omitempty"` +} + +func yamlUnmarshalStrict(raw []byte, v interface{}) error { + d := yaml.NewDecoder(bytes.NewReader(raw)) + d.KnownFields(true) + return d.Decode(v) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go new file mode 100644 index 000000000..fed300079 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go @@ -0,0 +1,275 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +/* +NOTE: unlike the public types these should not have serialization tags and +should stay 100% internal. These are used to pass around the processed public +config for internal usage. +*/ + +// Cluster contains kind cluster configuration +type Cluster struct { + // The cluster name. + // Optional, this will be overridden by --name / KIND_CLUSTER_NAME + Name string + + // Nodes contains the list of nodes defined in the `kind` Cluster + // If unset this will default to a single control-plane node + // Note that if more than one control plane is specified, an external + // control plane load balancer will be provisioned implicitly + Nodes []Node + + /* Advanced fields */ + + // Networking contains cluster wide network settings + Networking Networking + + // FeatureGates contains a map of Kubernetes feature gates to whether they + // are enabled. The feature gates specified here are passed to all Kubernetes components as flags or in config. + // + // https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ + FeatureGates map[string]bool + + // RuntimeConfig Keys and values are translated into --runtime-config values for kube-apiserver, separated by commas. + // + // Use this to enable alpha APIs. + RuntimeConfig map[string]string + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // strategic merge patches to `kustomize build` internally + // https://github.com/kubernetes/community/blob/a9cf5c8f3380bb52ebe57b1e2dbdec136d8dd484/contributors/devel/sig-api-machinery/strategic-merge-patch.md + // This should be an inline yaml blob-string + KubeadmConfigPatches []string + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as patchesJson6902 to `kustomize build` + KubeadmConfigPatchesJSON6902 []PatchJSON6902 + + // ContainerdConfigPatches are applied to every node's containerd config + // in the order listed. + // These should be toml stringsto be applied as merge patches + ContainerdConfigPatches []string + + // ContainerdConfigPatchesJSON6902 are applied to every node's containerd config + // in the order listed. + // These should be YAML or JSON formatting RFC 6902 JSON patches + ContainerdConfigPatchesJSON6902 []string +} + +// Node contains settings for a node in the `kind` Cluster. +// A node in kind config represent a container that will be provisioned with all the components +// required for the assigned role in the Kubernetes cluster +type Node struct { + // Role defines the role of the node in the in the Kubernetes cluster + // created by kind + // + // Defaults to "control-plane" + Role NodeRole + + // Image is the node image to use when creating this node + // If unset a default image will be used, see defaults.Image + Image string + + // Labels are the labels with which the respective node will be labeled + Labels map[string]string + + /* Advanced fields */ + + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + ExtraMounts []Mount + + // ExtraPortMappings describes additional port mappings for the node container + // binded to a host Port + ExtraPortMappings []PortMapping + + // KubeadmConfigPatches are applied to the generated kubeadm config as + // strategic merge patches to `kustomize build` internally + // https://github.com/kubernetes/community/blob/a9cf5c8f3380bb52ebe57b1e2dbdec136d8dd484/contributors/devel/sig-api-machinery/strategic-merge-patch.md + // This should be an inline yaml blob-string + KubeadmConfigPatches []string + + // KubeadmConfigPatchesJSON6902 are applied to the generated kubeadm config + // as patchesJson6902 to `kustomize build` + KubeadmConfigPatchesJSON6902 []PatchJSON6902 +} + +// NodeRole defines possible role for nodes in a Kubernetes cluster managed by `kind` +type NodeRole string + +const ( + // ControlPlaneRole identifies a node that hosts a Kubernetes control-plane. + // NOTE: in single node clusters, control-plane nodes act also as a worker + // nodes, in which case the taint will be removed. see: + // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#control-plane-node-isolation + ControlPlaneRole NodeRole = "control-plane" + // WorkerRole identifies a node that hosts a Kubernetes worker + WorkerRole NodeRole = "worker" +) + +// Networking contains cluster wide network settings +type Networking struct { + // IPFamily is the network cluster model, currently it can be ipv4 or ipv6 + IPFamily ClusterIPFamily + // APIServerPort is the listen port on the host for the Kubernetes API Server + // Defaults to a random port on the host obtained by kind + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + APIServerPort int32 + // APIServerAddress is the listen address on the host for the Kubernetes + // API Server. This should be an IP address. + // + // Defaults to 127.0.0.1 + APIServerAddress string + // PodSubnet is the CIDR used for pod IPs + // kind will select a default if unspecified + PodSubnet string + // ServiceSubnet is the CIDR used for services VIPs + // kind will select a default if unspecified + ServiceSubnet string + // If DisableDefaultCNI is true, kind will not install the default CNI setup. + // Instead the user should install their own CNI after creating the cluster. + DisableDefaultCNI bool + // KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode + KubeProxyMode ProxyMode + // DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host. + DNSSearch *[]string +} + +// ClusterIPFamily defines cluster network IP family +type ClusterIPFamily string + +const ( + // IPv4Family sets ClusterIPFamily to ipv4 + IPv4Family ClusterIPFamily = "ipv4" + // IPv6Family sets ClusterIPFamily to ipv6 + IPv6Family ClusterIPFamily = "ipv6" + // DualStackFamily sets ClusterIPFamily to dual + DualStackFamily ClusterIPFamily = "dual" +) + +// ProxyMode defines a proxy mode for kube-proxy +type ProxyMode string + +const ( + // IPTablesProxyMode sets ProxyMode to iptables + IPTablesProxyMode ProxyMode = "iptables" + // IPVSProxyMode sets ProxyMode to ipvs + IPVSProxyMode ProxyMode = "ipvs" + // NoneProxyMode disables kube-proxy + NoneProxyMode ProxyMode = "none" +) + +// PatchJSON6902 represents an inline kustomize json 6902 patch +// https://tools.ietf.org/html/rfc6902 +type PatchJSON6902 struct { + // these fields specify the patch target resource + Group string + Version string + Kind string + // Patch should contain the contents of the json patch as a string + Patch string +} + +// Mount specifies a host volume to mount into a container. +// This is a close copy of the upstream cri Mount type +// see: k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2 +// It additionally serializes the "propagation" field with the string enum +// names on disk as opposed to the int32 values, and the serialized field names +// have been made closer to core/v1 VolumeMount field names +// In yaml this looks like: +// +// containerPath: /foo +// hostPath: /bar +// readOnly: true +// selinuxRelabel: false +// propagation: None +// +// Propagation may be one of: None, HostToContainer, Bidirectional +type Mount struct { + // Path of the mount within the container. + ContainerPath string + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string + // If set, the mount is read-only. + Readonly bool + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool + // Requested propagation mode. + Propagation MountPropagation +} + +// PortMapping specifies a host port mapped into a container port. +// In yaml this looks like: +// +// containerPort: 80 +// hostPort: 8000 +// listenAddress: 127.0.0.1 +// protocol: TCP +type PortMapping struct { + // Port within the container. + ContainerPort int32 + // Port on the host. + // + // If unset, a random port will be selected. + // + // NOTE: if you set the special value of `-1` then the node backend + // (docker, podman...) will be left to pick the port instead. + // This is potentially useful for remote hosts, BUT it means when the container + // is restarted it will be randomized. Leave this unset to allow kind to pick it. + HostPort int32 + // TODO: add protocol (tcp/udp) and port-ranges + ListenAddress string + // Protocol (TCP/UDP/SCTP) + Protocol PortMappingProtocol +} + +// MountPropagation represents an "enum" for mount propagation options, +// see also Mount. +type MountPropagation string + +const ( + // MountPropagationNone specifies that no mount propagation + // ("private" in Linux terminology). + MountPropagationNone MountPropagation = "None" + // MountPropagationHostToContainer specifies that mounts get propagated + // from the host to the container ("rslave" in Linux). + MountPropagationHostToContainer MountPropagation = "HostToContainer" + // MountPropagationBidirectional specifies that mounts get propagated from + // the host to the container and from the container to the host + // ("rshared" in Linux). + MountPropagationBidirectional MountPropagation = "Bidirectional" +) + +// PortMappingProtocol represents an "enum" for port mapping protocol options, +// see also PortMapping. +type PortMappingProtocol string + +const ( + // PortMappingProtocolTCP specifies TCP protocol + PortMappingProtocolTCP PortMappingProtocol = "TCP" + // PortMappingProtocolUDP specifies UDP protocol + PortMappingProtocolUDP PortMappingProtocol = "UDP" + // PortMappingProtocolSCTP specifies SCTP protocol + PortMappingProtocolSCTP PortMappingProtocol = "SCTP" +) diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go new file mode 100644 index 000000000..68185d157 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go @@ -0,0 +1,271 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "net" + "regexp" + "strings" + + "sigs.k8s.io/kind/pkg/errors" + "sigs.k8s.io/kind/pkg/internal/sets" +) + +// similar to valid docker container names, but since we will prefix +// and suffix this name, we can relax it a little +// see NewContext() for usage +// https://godoc.org/github.com/docker/docker/daemon/names#pkg-constants +var validNameRE = regexp.MustCompile(`^[a-z0-9.-]+$`) + +// Validate returns a ConfigErrors with an entry for each problem +// with the config, or nil if there are none +func (c *Cluster) Validate() error { + errs := []error{} + + // validate the name + if !validNameRE.MatchString(c.Name) { + errs = append(errs, errors.Errorf("'%s' is not a valid cluster name, cluster names must match `%s`", + c.Name, validNameRE.String())) + } + + // the api server port only needs checking if we aren't picking a random one + // at runtime + if c.Networking.APIServerPort != 0 { + // validate api server listen port + if err := validatePort(c.Networking.APIServerPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid apiServerPort")) + } + } + + // podSubnet should be a valid CIDR + if err := validateSubnets(c.Networking.PodSubnet, c.Networking.IPFamily); err != nil { + errs = append(errs, errors.Errorf("invalid pod subnet %v", err)) + } + + // serviceSubnet should be a valid CIDR + if err := validateSubnets(c.Networking.ServiceSubnet, c.Networking.IPFamily); err != nil { + errs = append(errs, errors.Errorf("invalid service subnet %v", err)) + } + + // KubeProxyMode should be iptables or ipvs + if c.Networking.KubeProxyMode != IPTablesProxyMode && c.Networking.KubeProxyMode != IPVSProxyMode && + c.Networking.KubeProxyMode != NoneProxyMode { + errs = append(errs, errors.Errorf("invalid kubeProxyMode: %s", c.Networking.KubeProxyMode)) + } + + // validate nodes + numByRole := make(map[NodeRole]int32) + // All nodes in the config should be valid + for i, n := range c.Nodes { + // validate the node + if err := n.Validate(); err != nil { + errs = append(errs, errors.Errorf("invalid configuration for node %d: %v", i, err)) + } + // update role count + if num, ok := numByRole[n.Role]; ok { + numByRole[n.Role] = 1 + num + } else { + numByRole[n.Role] = 1 + } + } + + // there must be at least one control plane node + numControlPlane, anyControlPlane := numByRole[ControlPlaneRole] + if !anyControlPlane || numControlPlane < 1 { + errs = append(errs, errors.Errorf("must have at least one %s node", string(ControlPlaneRole))) + } + + if len(errs) > 0 { + return errors.NewAggregate(errs) + } + return nil +} + +// Validate returns a ConfigErrors with an entry for each problem +// with the Node, or nil if there are none +func (n *Node) Validate() error { + errs := []error{} + + // validate node role should be one of the expected values + switch n.Role { + case ControlPlaneRole, + WorkerRole: + default: + errs = append(errs, errors.Errorf("%q is not a valid node role", n.Role)) + } + + // image should be defined + if n.Image == "" { + errs = append(errs, errors.New("image is a required field")) + } + + // validate extra port forwards + for _, mapping := range n.ExtraPortMappings { + if err := validatePort(mapping.HostPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid hostPort")) + } + + if err := validatePort(mapping.ContainerPort); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid containerPort")) + } + } + + if err := validatePortMappings(n.ExtraPortMappings); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid portMapping")) + } + + if len(errs) > 0 { + return errors.NewAggregate(errs) + } + + return nil +} + +func validatePortMappings(portMappings []PortMapping) error { + errMsg := "port mapping with same listen address, port and protocol already configured" + + wildcardAddrIPv4 := net.ParseIP("0.0.0.0") + wildcardAddrIPv6 := net.ParseIP("::") + + // bindMap has the following key-value structure + // PORT/PROTOCOL: [ IP ] + // { 80/TCP: [ 127.0.0.1, 192.168.2.3 ], 80/UDP: [ 0.0.0.0 ] } + bindMap := make(map[string]sets.String) + + formatPortProtocol := func(port int32, protocol PortMappingProtocol) string { + return fmt.Sprintf("%d/%s", port, protocol) + } + + for _, portMapping := range portMappings { + addr := net.ParseIP(portMapping.ListenAddress) + addrString := addr.String() + + portProtocol := formatPortProtocol(portMapping.HostPort, portMapping.Protocol) + possibleErr := fmt.Errorf("%s: %s:%s", errMsg, addrString, portProtocol) + + // in golang 0.0.0.0 and [::] are equivalent, convert [::] -> 0.0.0.0 + // https://github.com/golang/go/issues/48723 + if addr.Equal(wildcardAddrIPv6) { + addr = wildcardAddrIPv4 + addrString = addr.String() + } + + if _, ok := bindMap[portProtocol]; ok { + + // wildcard address case: + // return error if there already exists any listen address for same port and protocol + if addr.Equal(wildcardAddrIPv4) { + if bindMap[portProtocol].Len() > 0 { + return possibleErr + } + } + + // direct duplicate & wild card present check: + // return error if same combination of ip, port and protocol already exists in bindMap. + // return error if wildcard address is already present for same port & protocol + if bindMap[portProtocol].Has(addrString) || bindMap[portProtocol].Has(wildcardAddrIPv4.String()) { + return possibleErr + } + } else { + // initialize the set + bindMap[portProtocol] = sets.NewString() + } + + // add the entry to bindMap + bindMap[portProtocol].Insert(addrString) + } + return nil +} + +func validatePort(port int32) error { + // NOTE: -1 is a special value for auto-selecting the port in the container + // backend where possible as opposed to in kind itself. + if port < -1 || port > 65535 { + return errors.Errorf("invalid port number: %d", port) + } + return nil +} + +func validateSubnets(subnetStr string, ipFamily ClusterIPFamily) error { + allErrs := []error{} + + cidrsString := strings.Split(subnetStr, ",") + subnets := make([]*net.IPNet, 0, len(cidrsString)) + for _, cidrString := range cidrsString { + _, cidr, err := net.ParseCIDR(cidrString) + if err != nil { + return fmt.Errorf("failed to parse cidr value:%q with error: %v", cidrString, err) + } + subnets = append(subnets, cidr) + } + + dualstack := ipFamily == DualStackFamily + switch { + // if no subnets are defined + case len(subnets) == 0: + allErrs = append(allErrs, errors.New("no subnets defined")) + // if DualStack only 2 CIDRs allowed + case dualstack && len(subnets) > 2: + allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking")) + // if DualStack and there are 2 CIDRs validate if there is at least one of each IP family + case dualstack && len(subnets) == 2: + areDualStackCIDRs, err := isDualStackCIDRs(subnets) + if err != nil { + allErrs = append(allErrs, err) + } else if !areDualStackCIDRs { + allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking")) + } + // if not DualStack only one CIDR allowed + case !dualstack && len(subnets) > 1: + allErrs = append(allErrs, errors.New("only one CIDR allowed for single-stack networking")) + case ipFamily == IPv4Family && subnets[0].IP.To4() == nil: + allErrs = append(allErrs, errors.New("expected IPv4 CIDR for IPv4 family")) + case ipFamily == IPv6Family && subnets[0].IP.To4() != nil: + allErrs = append(allErrs, errors.New("expected IPv6 CIDR for IPv6 family")) + } + + if len(allErrs) > 0 { + return errors.NewAggregate(allErrs) + } + return nil +} + +// isDualStackCIDRs returns if +// - all are valid cidrs +// - at least one cidr from each family (v4 or v6) +func isDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for _, cidr := range cidrs { + if cidr == nil { + return false, fmt.Errorf("cidr %v is invalid", cidr) + } + + if v4Found && v6Found { + continue + } + + if cidr.IP != nil && cidr.IP.To4() == nil { + v6Found = true + continue + } + v4Found = true + } + + return v4Found && v6Found, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go new file mode 100644 index 000000000..6c86691fc --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,196 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Networking.DeepCopyInto(&out.Networking) + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RuntimeConfig != nil { + in, out := &in.RuntimeConfig, &out.RuntimeConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatches != nil { + in, out := &in.ContainerdConfigPatches, &out.ContainerdConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerdConfigPatchesJSON6902 != nil { + in, out := &in.ContainerdConfigPatchesJSON6902, &out.ContainerdConfigPatchesJSON6902 + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.DNSSearch != nil { + in, out := &in.DNSSearch, &out.DNSSearch + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.ExtraPortMappings != nil { + in, out := &in.ExtraPortMappings, &out.ExtraPortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatches != nil { + in, out := &in.KubeadmConfigPatches, &out.KubeadmConfigPatches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeadmConfigPatchesJSON6902 != nil { + in, out := &in.KubeadmConfigPatchesJSON6902, &out.KubeadmConfigPatchesJSON6902 + *out = make([]PatchJSON6902, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchJSON6902) DeepCopyInto(out *PatchJSON6902) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchJSON6902. +func (in *PatchJSON6902) DeepCopy() *PatchJSON6902 { + if in == nil { + return nil + } + out := new(PatchJSON6902) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go new file mode 100644 index 000000000..73eff7a1d --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/logger.go @@ -0,0 +1,255 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "bytes" + "fmt" + "io" + "runtime" + "strings" + "sync" + "sync/atomic" + + "sigs.k8s.io/kind/pkg/log" + + "sigs.k8s.io/kind/pkg/internal/env" +) + +// Logger is the kind cli's log.Logger implementation +type Logger struct { + writer io.Writer + writerMu sync.Mutex + verbosity log.Level + bufferPool *bufferPool + // kind special additions + isSmartWriter bool +} + +var _ log.Logger = &Logger{} + +// NewLogger returns a new Logger with the given verbosity +func NewLogger(writer io.Writer, verbosity log.Level) *Logger { + l := &Logger{ + verbosity: verbosity, + bufferPool: newBufferPool(), + } + l.SetWriter(writer) + return l +} + +// SetWriter sets the output writer +func (l *Logger) SetWriter(w io.Writer) { + l.writerMu.Lock() + defer l.writerMu.Unlock() + l.writer = w + _, isSpinner := w.(*Spinner) + l.isSmartWriter = isSpinner || env.IsSmartTerminal(w) +} + +// ColorEnabled returns true if the caller is OK to write colored output +func (l *Logger) ColorEnabled() bool { + l.writerMu.Lock() + defer l.writerMu.Unlock() + return l.isSmartWriter +} + +func (l *Logger) getVerbosity() log.Level { + return log.Level(atomic.LoadInt32((*int32)(&l.verbosity))) +} + +// SetVerbosity sets the loggers verbosity +func (l *Logger) SetVerbosity(verbosity log.Level) { + atomic.StoreInt32((*int32)(&l.verbosity), int32(verbosity)) +} + +// synchronized write to the inner writer +func (l *Logger) write(p []byte) (n int, err error) { + l.writerMu.Lock() + defer l.writerMu.Unlock() + return l.writer.Write(p) +} + +// writeBuffer writes buf with write, ensuring there is a trailing newline +func (l *Logger) writeBuffer(buf *bytes.Buffer) { + // ensure trailing newline + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + // TODO: should we handle this somehow?? + // Who logs for the logger? 🤔 + _, _ = l.write(buf.Bytes()) +} + +// print writes a simple string to the log writer +func (l *Logger) print(message string) { + buf := bytes.NewBufferString(message) + l.writeBuffer(buf) +} + +// printf is roughly fmt.Fprintf against the log writer +func (l *Logger) printf(format string, args ...interface{}) { + buf := l.bufferPool.Get() + fmt.Fprintf(buf, format, args...) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// addDebugHeader inserts the debug line header to buf +func addDebugHeader(buf *bytes.Buffer) { + _, file, line, ok := runtime.Caller(3) + // lifted from klog + if !ok { + file = "???" + line = 1 + } else { + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } + } + buf.Grow(len(file) + 11) // we know at least this many bytes are needed + buf.WriteString("DEBUG: ") + buf.WriteString(file) + buf.WriteByte(':') + fmt.Fprintf(buf, "%d", line) + buf.WriteByte(']') + buf.WriteByte(' ') +} + +// debug is like print but with a debug log header +func (l *Logger) debug(message string) { + buf := l.bufferPool.Get() + addDebugHeader(buf) + buf.WriteString(message) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// debugf is like printf but with a debug log header +func (l *Logger) debugf(format string, args ...interface{}) { + buf := l.bufferPool.Get() + addDebugHeader(buf) + fmt.Fprintf(buf, format, args...) + l.writeBuffer(buf) + l.bufferPool.Put(buf) +} + +// Warn is part of the log.Logger interface +func (l *Logger) Warn(message string) { + l.print(message) +} + +// Warnf is part of the log.Logger interface +func (l *Logger) Warnf(format string, args ...interface{}) { + l.printf(format, args...) +} + +// Error is part of the log.Logger interface +func (l *Logger) Error(message string) { + l.print(message) +} + +// Errorf is part of the log.Logger interface +func (l *Logger) Errorf(format string, args ...interface{}) { + l.printf(format, args...) +} + +// V is part of the log.Logger interface +func (l *Logger) V(level log.Level) log.InfoLogger { + return infoLogger{ + logger: l, + level: level, + enabled: level <= l.getVerbosity(), + } +} + +// infoLogger implements log.InfoLogger for Logger +type infoLogger struct { + logger *Logger + level log.Level + enabled bool +} + +// Enabled is part of the log.InfoLogger interface +func (i infoLogger) Enabled() bool { + return i.enabled +} + +// Info is part of the log.InfoLogger interface +func (i infoLogger) Info(message string) { + if !i.enabled { + return + } + // for > 0, we are writing debug messages, include extra info + if i.level > 0 { + i.logger.debug(message) + } else { + i.logger.print(message) + } +} + +// Infof is part of the log.InfoLogger interface +func (i infoLogger) Infof(format string, args ...interface{}) { + if !i.enabled { + return + } + // for > 0, we are writing debug messages, include extra info + if i.level > 0 { + i.logger.debugf(format, args...) + } else { + i.logger.printf(format, args...) + } +} + +// bufferPool is a type safe sync.Pool of *byte.Buffer, guaranteed to be Reset +type bufferPool struct { + sync.Pool +} + +// newBufferPool returns a new bufferPool +func newBufferPool() *bufferPool { + return &bufferPool{ + sync.Pool{ + New: func() interface{} { + // The Pool's New function should generally only return pointer + // types, since a pointer can be put into the return interface + // value without an allocation: + return new(bytes.Buffer) + }, + }, + } +} + +// Get obtains a buffer from the pool +func (b *bufferPool) Get() *bytes.Buffer { + return b.Pool.Get().(*bytes.Buffer) +} + +// Put returns a buffer to the pool, resetting it first +func (b *bufferPool) Put(x *bytes.Buffer) { + // only store small buffers to avoid pointless allocation + // avoid keeping arbitrarily large buffers + if x.Len() > 256 { + return + } + x.Reset() + b.Pool.Put(x) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go new file mode 100644 index 000000000..695106522 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/override.go @@ -0,0 +1,18 @@ +package cli + +import ( + "os" + + "github.com/spf13/pflag" +) + +// OverrideDefaultName conditionally allows overriding the default cluster name +// by setting the KIND_CLUSTER_NAME environment variable +// only if --name wasn't set explicitly +func OverrideDefaultName(fs *pflag.FlagSet) { + if !fs.Changed("name") { + if name := os.Getenv("KIND_CLUSTER_NAME"); name != "" { + _ = fs.Set("name", name) + } + } +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go new file mode 100644 index 000000000..2af86a250 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/spinner.go @@ -0,0 +1,168 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + "io" + "runtime" + "sync" + "time" +) + +// custom CLI loading spinner for kind +var spinnerFrames = []string{ + "⠈⠁", + "⠈⠑", + "⠈⠱", + "⠈⡱", + "⢀⡱", + "⢄⡱", + "⢄⡱", + "⢆⡱", + "⢎⡱", + "⢎⡰", + "⢎⡠", + "⢎⡀", + "⢎⠁", + "⠎⠁", + "⠊⠁", +} + +// Spinner is a simple and efficient CLI loading spinner used by kind +// It is simplistic and assumes that the line length will not change. +type Spinner struct { + stop chan struct{} // signals writer goroutine to stop from Stop() + stopped chan struct{} // signals Stop() that the writer goroutine stopped + mu *sync.Mutex // protects the mutable bits + // below are protected by mu + running bool + writer io.Writer + ticker *time.Ticker // signals that it is time to write a frame + prefix string + suffix string + // format string used to write a frame, depends on the host OS / terminal + frameFormat string +} + +// spinner implements writer +var _ io.Writer = &Spinner{} + +// NewSpinner initializes and returns a new Spinner that will write to w +// NOTE: w should be os.Stderr or similar, and it should be a Terminal +func NewSpinner(w io.Writer) *Spinner { + frameFormat := "\x1b[?7l\r%s%s%s\x1b[?7h" + // toggling wrapping seems to behave poorly on windows + // in general only the simplest escape codes behave well at the moment, + // and only in newer shells + if runtime.GOOS == "windows" { + frameFormat = "\r%s%s%s" + } + return &Spinner{ + stop: make(chan struct{}, 1), + stopped: make(chan struct{}), + mu: &sync.Mutex{}, + writer: w, + frameFormat: frameFormat, + } +} + +// SetPrefix sets the prefix to print before the spinner +func (s *Spinner) SetPrefix(prefix string) { + s.mu.Lock() + defer s.mu.Unlock() + s.prefix = prefix +} + +// SetSuffix sets the suffix to print after the spinner +func (s *Spinner) SetSuffix(suffix string) { + s.mu.Lock() + defer s.mu.Unlock() + s.suffix = suffix +} + +// Start starts the spinner running +func (s *Spinner) Start() { + s.mu.Lock() + defer s.mu.Unlock() + // don't start if we've already started + if s.running { + return + } + // flag that we've started + s.running = true + // start / create a frame ticker + s.ticker = time.NewTicker(time.Millisecond * 100) + // spin in the background + go func() { + // write frames forever (until signaled to stop) + for { + for _, frame := range spinnerFrames { + select { + // prefer stopping, select this signal first + case <-s.stop: + func() { + s.mu.Lock() + defer s.mu.Unlock() + s.ticker.Stop() // free up the ticker + s.running = false // mark as stopped (it's fine to start now) + s.stopped <- struct{}{} // tell Stop() that we're done + }() + return // ... and stop + // otherwise continue and write one frame + case <-s.ticker.C: + func() { + s.mu.Lock() + defer s.mu.Unlock() + fmt.Fprintf(s.writer, s.frameFormat, s.prefix, frame, s.suffix) + }() + } + } + } + }() +} + +// Stop signals the spinner to stop +func (s *Spinner) Stop() { + s.mu.Lock() + if !s.running { + s.mu.Unlock() + return + } + // try to stop, do nothing if channel is full (IE already busy stopping) + s.stop <- struct{}{} + s.mu.Unlock() + // wait for stop to be finished + <-s.stopped +} + +// Write implements io.Writer, interrupting the spinner and writing to +// the inner writer +func (s *Spinner) Write(p []byte) (n int, err error) { + // lock first, so nothing else can start writing until we are done + s.mu.Lock() + defer s.mu.Unlock() + // it the spinner is not running, just write directly + if !s.running { + return s.writer.Write(p) + } + // otherwise: we will rewrite the line first + if _, err := s.writer.Write([]byte("\r")); err != nil { + return 0, err + } + return s.writer.Write(p) +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go b/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go new file mode 100644 index 000000000..d7ef7eb7a --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/cli/status.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + + "sigs.k8s.io/kind/pkg/log" +) + +// Status is used to track ongoing status in a CLI, with a nice loading spinner +// when attached to a terminal +type Status struct { + spinner *Spinner + status string + logger log.Logger + // for controlling coloring etc + successFormat string + failureFormat string +} + +// StatusForLogger returns a new status object for the logger l, +// if l is the kind cli logger and the writer is a Spinner, that spinner +// will be used for the status +func StatusForLogger(l log.Logger) *Status { + s := &Status{ + logger: l, + successFormat: " ✓ %s\n", + failureFormat: " ✗ %s\n", + } + // if we're using the CLI logger, check for if it has a spinner setup + // and wire the status to that + if v, ok := l.(*Logger); ok { + if v2, ok := v.writer.(*Spinner); ok { + s.spinner = v2 + // use colored success / failure messages + s.successFormat = " \x1b[32m✓\x1b[0m %s\n" + s.failureFormat = " \x1b[31m✗\x1b[0m %s\n" + } + } + return s +} + +// Start starts a new phase of the status, if attached to a terminal +// there will be a loading spinner with this status +func (s *Status) Start(status string) { + s.End(true) + // set new status + s.status = status + if s.spinner != nil { + s.spinner.SetSuffix(fmt.Sprintf(" %s ", s.status)) + s.spinner.Start() + } else { + s.logger.V(0).Infof(" • %s ...\n", s.status) + } +} + +// End completes the current status, ending any previous spinning and +// marking the status as success or failure +func (s *Status) End(success bool) { + if s.status == "" { + return + } + + if s.spinner != nil { + s.spinner.Stop() + fmt.Fprint(s.spinner.writer, "\r") + } + if success { + s.logger.V(0).Infof(s.successFormat, s.status) + } else { + s.logger.V(0).Infof(s.failureFormat, s.status) + } + + s.status = "" +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go b/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go new file mode 100644 index 000000000..5f809dd9f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "io" + "os" + "runtime" + + isatty "github.com/mattn/go-isatty" +) + +// a fake TTY type for testing that can only be implemented within this package +type isTestFakeTTY interface { + isTestFakeTTY() +} + +// IsTerminal returns true if the writer w is a terminal +func IsTerminal(w io.Writer) bool { + // check for internal fake type we can use for testing. + if _, ok := (w).(isTestFakeTTY); ok { + return true + } + // check for real terminals + if v, ok := (w).(*os.File); ok { + return isatty.IsTerminal(v.Fd()) + } + return false +} + +// IsSmartTerminal returns true if the writer w is a terminal AND +// we think that the terminal is smart enough to use VT escape codes etc. +func IsSmartTerminal(w io.Writer) bool { + return isSmartTerminal(w, runtime.GOOS, os.LookupEnv) +} + +func isSmartTerminal(w io.Writer, GOOS string, lookupEnv func(string) (string, bool)) bool { + // Not smart if it's not a tty + if !IsTerminal(w) { + return false + } + + // getenv helper for when we only care about the value + getenv := func(e string) string { + v, _ := lookupEnv(e) + return v + } + + // Explicit request for no ANSI escape codes + // https://no-color.org/ + if _, set := lookupEnv("NO_COLOR"); set { + return false + } + + // Explicitly dumb terminals are not smart + // https://en.wikipedia.org/wiki/Computer_terminal#Dumb_terminals + term := getenv("TERM") + if term == "dumb" { + return false + } + // st has some bug 🤷‍♂️ + // https://github.com/kubernetes-sigs/kind/issues/1892 + if term == "st-256color" { + return false + } + + // On Windows WT_SESSION is set by the modern terminal component. + // Older terminals have poor support for UTF-8, VT escape codes, etc. + if GOOS == "windows" && getenv("WT_SESSION") == "" { + return false + } + + /* CI Systems with bad Fake TTYs */ + // Travis CI + // https://github.com/kubernetes-sigs/kind/issues/1478 + // We can detect it with documented magical environment variables + // https://docs.travis-ci.com/user/environment-variables/#default-environment-variables + if getenv("HAS_JOSH_K_SEAL_OF_APPROVAL") == "true" && getenv("TRAVIS") == "true" { + return false + } + + // OK, we'll assume it's smart now, given no evidence otherwise. + return true +} + +// trivial fake TTY writer for testing +type testFakeTTY struct{} + +func (t *testFakeTTY) Write(p []byte) (int, error) { + return len(p), nil +} + +func (t *testFakeTTY) isTestFakeTTY() {} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go new file mode 100644 index 000000000..d24d9ce39 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package patch contains helpers for applying patches +package patch diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go new file mode 100644 index 000000000..e140c0e4b --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + jsonpatch "github.com/evanphx/json-patch/v5" + + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +type json6902Patch struct { + raw string // raw original contents + patch jsonpatch.Patch // processed JSON 6902 patch + matchInfo matchInfo // used to match resources +} + +func convertJSON6902Patches(patchesJSON6902 []config.PatchJSON6902) ([]json6902Patch, error) { + patches := []json6902Patch{} + for _, configPatch := range patchesJSON6902 { + patchJSON, err := yaml.YAMLToJSON([]byte(configPatch.Patch)) + if err != nil { + return nil, errors.WithStack(err) + } + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + return nil, errors.WithStack(err) + } + patches = append(patches, json6902Patch{ + raw: configPatch.Patch, + patch: patch, + matchInfo: matchInfoForConfigJSON6902Patch(configPatch), + }) + } + return patches, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go new file mode 100644 index 000000000..9bdd1997f --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "strings" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// KubeYAML takes a Kubernetes object YAML document stream to patch, +// merge patches, and JSON 6902 patches. +// +// It returns a patched a YAML document stream. +// +// Matching is performed on Kubernetes style v1 TypeMeta fields +// (kind and apiVersion), between the YAML documents and the patches. +// +// Patches match if their kind and apiVersion match a document, with the exception +// that if the patch does not set apiVersion it will be ignored. +func KubeYAML(toPatch string, patches []string, patches6902 []config.PatchJSON6902) (string, error) { + // pre-process, including splitting up documents etc. + resources, err := parseResources(toPatch) + if err != nil { + return "", errors.Wrap(err, "failed to parse yaml to patch") + } + mergePatches, err := parseMergePatches(patches) + if err != nil { + return "", errors.Wrap(err, "failed to parse patches") + } + json6902patches, err := convertJSON6902Patches(patches6902) + if err != nil { + return "", errors.Wrap(err, "failed to parse JSON 6902 patches") + } + // apply patches and build result + builder := &strings.Builder{} + for i, r := range resources { + // apply merge patches + for _, p := range mergePatches { + if _, err := r.applyMergePatch(p); err != nil { + return "", errors.Wrap(err, "failed to apply patch") + } + } + // apply RFC 6902 JSON patches + for _, p := range json6902patches { + if _, err := r.apply6902Patch(p); err != nil { + return "", errors.Wrap(err, "failed to apply JSON 6902 patch") + } + } + // write out result + if err := r.encodeTo(builder); err != nil { + return "", errors.Wrap(err, "failed to write patched resource") + } + // write document separator + if i+1 < len(resources) { + if _, err := builder.WriteString("---\n"); err != nil { + return "", errors.Wrap(err, "failed to write document separator") + } + } + } + // verify that all patches were used + return builder.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go new file mode 100644 index 000000000..7674ea5fb --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" + + "sigs.k8s.io/kind/pkg/internal/apis/config" +) + +// we match resources and patches on their v1 TypeMeta +type matchInfo struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` +} + +func parseYAMLMatchInfo(raw string) (matchInfo, error) { + m := matchInfo{} + if err := yaml.Unmarshal([]byte(raw), &m); err != nil { + return matchInfo{}, errors.Wrapf(err, "failed to parse type meta for %q", raw) + } + return m, nil +} + +func matchInfoForConfigJSON6902Patch(patch config.PatchJSON6902) matchInfo { + return matchInfo{ + Kind: patch.Kind, + APIVersion: groupVersionToAPIVersion(patch.Group, patch.Version), + } +} + +func groupVersionToAPIVersion(group, version string) string { + if group == "" { + return version + } + return group + "/" + version +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go new file mode 100644 index 000000000..c52b9f55e --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +type mergePatch struct { + raw string // the original raw data + json []byte // the processed data (in JSON form) + matchInfo matchInfo // for matching resources +} + +func parseMergePatches(rawPatches []string) ([]mergePatch, error) { + patches := []mergePatch{} + // split document streams before trying to parse them + splitRawPatches := make([]string, 0, len(rawPatches)) + for _, raw := range rawPatches { + splitRaw, err := splitYAMLDocuments(raw) + if err != nil { + return nil, err + } + splitRawPatches = append(splitRawPatches, splitRaw...) + } + for _, raw := range splitRawPatches { + matchInfo, err := parseYAMLMatchInfo(raw) + if err != nil { + return nil, errors.WithStack(err) + } + json, err := yaml.YAMLToJSON([]byte(raw)) + if err != nil { + return nil, errors.WithStack(err) + } + patches = append(patches, mergePatch{ + raw: raw, + json: json, + matchInfo: matchInfo, + }) + } + return patches, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go new file mode 100644 index 000000000..17f898ec9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "bufio" + "bytes" + "io" + "strings" + + jsonpatch "github.com/evanphx/json-patch/v5" + + "sigs.k8s.io/yaml" + + "sigs.k8s.io/kind/pkg/errors" +) + +type resource struct { + raw string // the original raw data + json []byte // the processed data (in JSON form), may be mutated + matchInfo matchInfo // for matching patches +} + +func (r *resource) apply6902Patch(patch json6902Patch) (matches bool, err error) { + if !r.matches(patch.matchInfo) { + return false, nil + } + patched, err := patch.patch.Apply(r.json) + if err != nil { + return true, errors.WithStack(err) + } + r.json = patched + return true, nil +} + +func (r *resource) applyMergePatch(patch mergePatch) (matches bool, err error) { + if !r.matches(patch.matchInfo) { + return false, nil + } + patched, err := jsonpatch.MergePatch(r.json, patch.json) + if err != nil { + return true, errors.WithStack(err) + } + r.json = patched + return true, nil +} + +func (r resource) matches(o matchInfo) bool { + m := &r.matchInfo + // we require kind to match, but if the patch does not specify + // APIVersion we ignore it (eg to allow trivial patches across kubeadm versions) + return m.Kind == o.Kind && (o.APIVersion == "" || m.APIVersion == o.APIVersion) +} + +func (r *resource) encodeTo(w io.Writer) error { + encoded, err := yaml.JSONToYAML(r.json) + if err != nil { + return errors.WithStack(err) + } + if _, err := w.Write(encoded); err != nil { + return errors.WithStack(err) + } + return nil +} + +func parseResources(yamlDocumentStream string) ([]resource, error) { + resources := []resource{} + documents, err := splitYAMLDocuments(yamlDocumentStream) + if err != nil { + return nil, err + } + for _, raw := range documents { + matchInfo, err := parseYAMLMatchInfo(raw) + if err != nil { + return nil, errors.WithStack(err) + } + json, err := yaml.YAMLToJSON([]byte(raw)) + if err != nil { + return nil, errors.WithStack(err) + } + resources = append(resources, resource{ + raw: raw, + json: json, + matchInfo: matchInfo, + }) + } + return resources, nil +} + +func splitYAMLDocuments(yamlDocumentStream string) ([]string, error) { + documents := []string{} + scanner := bufio.NewScanner(strings.NewReader(yamlDocumentStream)) + scanner.Split(splitYAMLDocument) + for scanner.Scan() { + documents = append(documents, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return nil, errors.Wrap(err, "error splitting documents") + } + return documents, nil +} + +const yamlSeparator = "\n---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +// this is borrowed from k8s.io/apimachinery/pkg/util/yaml/decoder.go +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go b/vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go new file mode 100644 index 000000000..5923977a6 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "bytes" + "encoding/json" + + burntoml "github.com/BurntSushi/toml" + jsonpatch "github.com/evanphx/json-patch/v5" + toml "github.com/pelletier/go-toml" + yaml "gopkg.in/yaml.v3" + + "sigs.k8s.io/kind/pkg/errors" +) + +// TOML patches toPatch with the patches (should be TOML merge patches) and patches6902 (should be JSON 6902 patches) +func TOML(toPatch string, patches []string, patches6902 []string) (string, error) { + // convert to JSON for patching + j, err := tomlToJSON([]byte(toPatch)) + if err != nil { + return "", err + } + // apply merge patches + for _, patch := range patches { + pj, err := tomlToJSON([]byte(patch)) + if err != nil { + return "", err + } + patched, err := jsonpatch.MergePatch(j, pj) + if err != nil { + return "", errors.WithStack(err) + } + j = patched + } + // apply JSON 6902 patches + for _, patch6902 := range patches6902 { + patch, err := jsonpatch.DecodePatch([]byte(patch6902)) + if err != nil { + return "", errors.WithStack(err) + } + patched, err := patch.Apply(j) + if err != nil { + return "", errors.WithStack(err) + } + j = patched + } + // convert result back to TOML + return jsonToTOMLString(j) +} + +// tomlToJSON converts arbitrary TOML to JSON +func tomlToJSON(t []byte) ([]byte, error) { + // we use github.com.pelletier/go-toml here to unmarshal arbitrary TOML to JSON + tree, err := toml.LoadBytes(t) + if err != nil { + return nil, errors.WithStack(err) + } + b, err := json.Marshal(tree.ToMap()) + if err != nil { + return nil, errors.WithStack(err) + } + return b, nil +} + +// jsonToTOMLString converts arbitrary JSON to TOML +func jsonToTOMLString(j []byte) (string, error) { + var unstruct interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + if err := yaml.Unmarshal(j, &unstruct); err != nil { + return "", errors.WithStack(err) + } + // we use github.com/BurntSushi/toml here because github.com.pelletier/go-toml + // can only marshal structs AND BurntSushi/toml is what contained uses + // and has more canonically formatted output (we initially plan to use + // this package for patching containerd config) + var buff bytes.Buffer + if err := burntoml.NewEncoder(&buff).Encode(unstruct); err != nil { + return "", errors.WithStack(err) + } + return buff.String(), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go new file mode 100644 index 000000000..70f70a926 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sets implements set types. +// +// This is forked from k8s.io/apimachinery/pkg/util/sets (under the same project +// and license), because k8s.io/apimachinery is a relatively heavy dependency +// and we only need some trivial utilities. Avoiding importing k8s.io/apimachinery +// makes kind easier to embed in other projects for testing etc. +// +// The set implementation is relatively small and very stable. +package sets diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go b/vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go new file mode 100644 index 000000000..e11e622c5 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go b/vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go new file mode 100644 index 000000000..e6f37db88 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go b/vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go new file mode 100644 index 000000000..0bb753f51 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version provides utilities for version number comparisons +// +// This is forked from k8s.io/apimachinery/pkg/util/version to make +// kind easier to import (k8s.io/apimachinery/pkg/util/version is a stable, +// mature package with no externaldependencies within a large, heavy module) +package version diff --git a/vendor/sigs.k8s.io/kind/pkg/internal/version/version.go b/vendor/sigs.k8s.io/kind/pkg/internal/version/version.go new file mode 100644 index 000000000..8c997ec45 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/internal/version/version.go @@ -0,0 +1,325 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Version is an opaque representation of a version number +type Version struct { + components []uint + semver bool + preRelease string + buildMetadata string +} + +var ( + // versionMatchRE splits a version string into numeric and "extra" parts + versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`) + // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release + extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`) +) + +func parse(str string, semver bool) (*Version, error) { + parts := versionMatchRE.FindStringSubmatch(str) + if parts == nil { + return nil, fmt.Errorf("could not parse %q as version", str) + } + numbers, extra := parts[1], parts[2] + + components := strings.Split(numbers, ".") + if (semver && len(components) != 3) || (!semver && len(components) < 2) { + return nil, fmt.Errorf("illegal version string %q", str) + } + + v := &Version{ + components: make([]uint, len(components)), + semver: semver, + } + for i, comp := range components { + if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + num, err := strconv.ParseUint(comp, 10, 0) + if err != nil { + return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err) + } + v.components[i] = uint(num) + } + + if semver && extra != "" { + extraParts := extraMatchRE.FindStringSubmatch(extra) + if extraParts == nil { + return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str) + } + v.preRelease, v.buildMetadata = extraParts[1], extraParts[2] + + for _, comp := range strings.Split(v.preRelease, ".") { + if _, err := strconv.ParseUint(comp, 10, 0); err == nil { + if strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + } + } + } + + return v, nil +} + +// ParseGeneric parses a "generic" version string. The version string must consist of two +// or more dot-separated numeric fields (the first of which can't have leading zeroes), +// followed by arbitrary uninterpreted data (which need not be separated from the final +// numeric field by punctuation). For convenience, leading and trailing whitespace is +// ignored, and the version can be preceded by the letter "v". See also ParseSemantic. +func ParseGeneric(str string) (*Version, error) { + return parse(str, false) +} + +// MustParseGeneric is like ParseGeneric except that it panics on error +func MustParseGeneric(str string) *Version { + v, err := ParseGeneric(str) + if err != nil { + panic(err) + } + return v +} + +// ParseSemantic parses a version string that exactly obeys the syntax and semantics of +// the "Semantic Versioning" specification (http://semver.org/) (although it ignores +// leading and trailing whitespace, and allows the version to be preceded by "v"). For +// version strings that are not guaranteed to obey the Semantic Versioning syntax, use +// ParseGeneric. +func ParseSemantic(str string) (*Version, error) { + return parse(str, true) +} + +// MustParseSemantic is like ParseSemantic except that it panics on error +func MustParseSemantic(str string) *Version { + v, err := ParseSemantic(str) + if err != nil { + panic(err) + } + return v +} + +// Major returns the major release number +func (v *Version) Major() uint { + return v.components[0] +} + +// Minor returns the minor release number +func (v *Version) Minor() uint { + return v.components[1] +} + +// Patch returns the patch release number if v is a Semantic Version, or 0 +func (v *Version) Patch() uint { + if len(v.components) < 3 { + return 0 + } + return v.components[2] +} + +// BuildMetadata returns the build metadata, if v is a Semantic Version, or "" +func (v *Version) BuildMetadata() string { + return v.buildMetadata +} + +// PreRelease returns the prerelease metadata, if v is a Semantic Version, or "" +func (v *Version) PreRelease() string { + return v.preRelease +} + +// Components returns the version number components +func (v *Version) Components() []uint { + return v.components +} + +// WithMajor returns copy of the version object with requested major number +func (v *Version) WithMajor(major uint) *Version { + result := *v + result.components = []uint{major, v.Minor(), v.Patch()} + return &result +} + +// WithMinor returns copy of the version object with requested minor number +func (v *Version) WithMinor(minor uint) *Version { + result := *v + result.components = []uint{v.Major(), minor, v.Patch()} + return &result +} + +// WithPatch returns copy of the version object with requested patch number +func (v *Version) WithPatch(patch uint) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), patch} + return &result +} + +// WithPreRelease returns copy of the version object with requested prerelease +func (v *Version) WithPreRelease(preRelease string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.preRelease = preRelease + return &result +} + +// WithBuildMetadata returns copy of the version object with requested buildMetadata +func (v *Version) WithBuildMetadata(buildMetadata string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.buildMetadata = buildMetadata + return &result +} + +// String converts a Version back to a string; note that for versions parsed with +// ParseGeneric, this will not include the trailing uninterpreted portion of the version +// number. +func (v *Version) String() string { + if v == nil { + return "" + } + var buffer bytes.Buffer + + for i, comp := range v.components { + if i > 0 { + buffer.WriteString(".") + } + buffer.WriteString(fmt.Sprintf("%d", comp)) + } + if v.preRelease != "" { + buffer.WriteString("-") + buffer.WriteString(v.preRelease) + } + if v.buildMetadata != "" { + buffer.WriteString("+") + buffer.WriteString(v.buildMetadata) + } + + return buffer.String() +} + +// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 +// if they are equal +func (v *Version) compareInternal(other *Version) int { + + vLen := len(v.components) + oLen := len(other.components) + for i := 0; i < vLen && i < oLen; i++ { + switch { + case other.components[i] < v.components[i]: + return 1 + case other.components[i] > v.components[i]: + return -1 + } + } + + // If components are common but one has more items and they are not zeros, it is bigger + switch { + case oLen < vLen && !onlyZeros(v.components[oLen:]): + return 1 + case oLen > vLen && !onlyZeros(other.components[vLen:]): + return -1 + } + + if !v.semver || !other.semver { + return 0 + } + + switch { + case v.preRelease == "" && other.preRelease != "": + return 1 + case v.preRelease != "" && other.preRelease == "": + return -1 + case v.preRelease == other.preRelease: // includes case where both are "" + return 0 + } + + vPR := strings.Split(v.preRelease, ".") + oPR := strings.Split(other.preRelease, ".") + for i := 0; i < len(vPR) && i < len(oPR); i++ { + vNum, err := strconv.ParseUint(vPR[i], 10, 0) + if err == nil { + oNum, err := strconv.ParseUint(oPR[i], 10, 0) + if err == nil { + switch { + case oNum < vNum: + return 1 + case oNum > vNum: + return -1 + default: + continue + } + } + } + if oPR[i] < vPR[i] { + return 1 + } else if oPR[i] > vPR[i] { + return -1 + } + } + + switch { + case len(oPR) < len(vPR): + return 1 + case len(oPR) > len(vPR): + return -1 + } + + return 0 +} + +// returns false if array contain any non-zero element +func onlyZeros(array []uint) bool { + for _, num := range array { + if num != 0 { + return false + } + } + return true +} + +// AtLeast tests if a version is at least equal to a given minimum version. If both +// Versions are Semantic Versions, this will use the Semantic Version comparison +// algorithm. Otherwise, it will compare only the numeric components, with non-present +// components being considered "0" (ie, "1.4" is equal to "1.4.0"). +func (v *Version) AtLeast(min *Version) bool { + return v.compareInternal(min) != -1 +} + +// LessThan tests if a version is less than a given version. (It is exactly the opposite +// of AtLeast, for situations where asking "is v too old?" makes more sense than asking +// "is v new enough?".) +func (v *Version) LessThan(other *Version) bool { + return v.compareInternal(other) == -1 +} + +// Compare compares v against a version string (which will be parsed as either Semantic +// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if +// it is greater than other, or 0 if they are equal. +func (v *Version) Compare(other string) (int, error) { + ov, err := parse(other, v.semver) + if err != nil { + return 0, err + } + return v.compareInternal(ov), nil +} diff --git a/vendor/sigs.k8s.io/kind/pkg/log/doc.go b/vendor/sigs.k8s.io/kind/pkg/log/doc.go new file mode 100644 index 000000000..ffda513b9 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log defines a logging interface that kind uses +// This is roughly a minimal subset of klog github.com/kubernetes/klog +package log diff --git a/vendor/sigs.k8s.io/kind/pkg/log/noop.go b/vendor/sigs.k8s.io/kind/pkg/log/noop.go new file mode 100644 index 000000000..f5a20ee69 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/noop.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +// NoopLogger implements the Logger interface and never logs anything +type NoopLogger struct{} + +// Warn meets the Logger interface but does nothing +func (n NoopLogger) Warn(message string) {} + +// Warnf meets the Logger interface but does nothing +func (n NoopLogger) Warnf(format string, args ...interface{}) {} + +// Error meets the Logger interface but does nothing +func (n NoopLogger) Error(message string) {} + +// Errorf meets the Logger interface but does nothing +func (n NoopLogger) Errorf(format string, args ...interface{}) {} + +// V meets the Logger interface but does nothing +func (n NoopLogger) V(level Level) InfoLogger { return NoopInfoLogger{} } + +// NoopInfoLogger implements the InfoLogger interface and never logs anything +type NoopInfoLogger struct{} + +// Enabled meets the InfoLogger interface but always returns false +func (n NoopInfoLogger) Enabled() bool { return false } + +// Info meets the InfoLogger interface but does nothing +func (n NoopInfoLogger) Info(message string) {} + +// Infof meets the InfoLogger interface but does nothing +func (n NoopInfoLogger) Infof(format string, args ...interface{}) {} diff --git a/vendor/sigs.k8s.io/kind/pkg/log/types.go b/vendor/sigs.k8s.io/kind/pkg/log/types.go new file mode 100644 index 000000000..875eabdf4 --- /dev/null +++ b/vendor/sigs.k8s.io/kind/pkg/log/types.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +// Level is a verbosity logging level for Info logs +// See also https://github.com/kubernetes/klog +type Level int32 + +// Logger defines the logging interface kind uses +// It is roughly a subset of github.com/kubernetes/klog +type Logger interface { + // Warn should be used to write user facing warnings + Warn(message string) + // Warnf should be used to write Printf style user facing warnings + Warnf(format string, args ...interface{}) + // Error may be used to write an error message when it occurs + // Prefer returning an error instead in most cases + Error(message string) + // Errorf may be used to write a Printf style error message when it occurs + // Prefer returning an error instead in most cases + Errorf(format string, args ...interface{}) + // V() returns an InfoLogger for a given verbosity Level + // + // Normal verbosity levels: + // V(0): normal user facing messages go to V(0) + // V(1): debug messages start when V(N > 0), these should be high level + // V(2): more detailed log messages + // V(3+): trace level logging, in increasing "noisiness" ... allowing + // arbitrarily detailed logging at extremely low cost unless the + // logger has actually been configured to display these (E.G. via the -v + // command line flag) + // + // It is expected that the returned InfoLogger will be extremely cheap + // to interact with for a Level greater than the enabled level + V(Level) InfoLogger +} + +// InfoLogger defines the info logging interface kind uses +// It is roughly a subset of Verbose from github.com/kubernetes/klog +type InfoLogger interface { + // Info is used to write a user facing status message + // + // See: Logger.V + Info(message string) + // Infof is used to write a Printf style user facing status message + Infof(format string, args ...interface{}) + // Enabled should return true if this verbosity level is enabled + // on the Logger + // + // See: Logger.V + Enabled() bool +}