diff --git a/.kitchen.yml b/.kitchen.yml index c8a72788f3..62beeda102 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -78,19 +78,6 @@ suites: systems: - name: stub_domains_upstream_nameservers backend: local - - name: "workload_identity" - transport: - root_module_directory: test/fixtures/workload_identity - verifier: - systems: - - name: gcloud - backend: local - controls: - - gcloud - - name: gcp - backend: gcp - controls: - - gcp - name: "workload_metadata_config" transport: root_module_directory: test/fixtures/workload_metadata_config @@ -98,20 +85,3 @@ suites: systems: - name: workload_metadata_config backend: local - - name: "node_pool" - transport: - root_module_directory: test/fixtures/node_pool - verifier: - systems: - - name: node_pool - backend: local - controls: - - gcloud - - kubectl - - name: "safer_cluster_iap_bastion" - transport: - root_module_directory: test/fixtures/safer_cluster_iap_bastion - verifier: - systems: - - name: safer_cluster_iap_bastion - backend: local diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index ce23f00b96..086ec1992a 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -309,17 +309,17 @@ steps: waitFor: - create-all name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge node-pool-local'] + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage apply --verbose'] - id: verify node-pool-local waitFor: - converge node-pool-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify node-pool-local'] + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage verify --verbose'] - id: destroy node-pool-local waitFor: - verify node-pool-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy node-pool-local'] + args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage destroy --verbose'] - id: apply sandbox-enabled-local waitFor: - create-all @@ -339,32 +339,32 @@ steps: waitFor: - create-all name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-identity-local'] + args: ['/bin/bash', '-c', 'cft test run TestWorkloadIdentity --stage apply --verbose'] - id: verify workload-identity-local waitFor: - converge workload-identity-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-identity-local'] + args: ['/bin/bash', '-c', 'cft test run TestWorkloadIdentity --stage verify --verbose'] - id: destroy workload-identity-local waitFor: - verify workload-identity-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-identity-local'] + args: ['/bin/bash', '-c', 'cft test run TestWorkloadIdentity --stage destroy --verbose'] - id: converge safer-cluster-iap-bastion-local waitFor: - create-all name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge safer-cluster-iap-bastion-local'] + args: ['/bin/bash', '-c', 'cft test run TestSaferClusterIapBastion --stage apply --verbose'] - id: verify safer-cluster-iap-bastion-local waitFor: - converge safer-cluster-iap-bastion-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify safer-cluster-iap-bastion-local'] + args: ['/bin/bash', '-c', 'cft test run TestSaferClusterIapBastion --stage verify --verbose'] - id: destroy safer-cluster-iap-bastion-local waitFor: - verify safer-cluster-iap-bastion-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy safer-cluster-iap-bastion-local'] + args: ['/bin/bash', '-c', 'cft test run TestSaferClusterIapBastion --stage teardown --verbose'] - id: apply simple-zonal-with-asm-local waitFor: - create-all diff --git a/examples/node_pool/main.tf b/examples/node_pool/main.tf index 0efa052976..3713311de6 100644 --- a/examples/node_pool/main.tf +++ b/examples/node_pool/main.tf @@ -43,6 +43,7 @@ module "gke" { disable_legacy_metadata_endpoints = false cluster_autoscaling = var.cluster_autoscaling deletion_protection = false + service_account = "default" node_pools = [ { diff --git a/examples/workload_identity/main.tf b/examples/workload_identity/main.tf index 405cb5c4e9..abe7edecf8 100644 --- a/examples/workload_identity/main.tf +++ b/examples/workload_identity/main.tf @@ -1,5 +1,5 @@ /** - * Copyright 2018 Google LLC + * Copyright 2018-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,10 @@ provider "kubernetes" { host = "https://${module.gke.endpoint}" token = data.google_client_config.default.access_token cluster_ca_certificate = base64decode(module.gke.ca_certificate) + + ignore_annotations = [ + "^iam.gke.io\\/.*" + ] } module "gke" { diff --git a/test/fixtures/safer_cluster_iap_bastion/example.tf b/test/fixtures/safer_cluster_iap_bastion/example.tf index b4ea3d7650..767e10eac1 100644 --- a/test/fixtures/safer_cluster_iap_bastion/example.tf +++ b/test/fixtures/safer_cluster_iap_bastion/example.tf @@ -1,5 +1,5 @@ /** - * Copyright 2020 Google LLC + * Copyright 2020-2024 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/test/integration/node_pool/controls/gcloud.rb b/test/integration/node_pool/controls/gcloud.rb deleted file mode 100644 index bd2e756b0b..0000000000 --- a/test/integration/node_pool/controls/gcloud.rb +++ /dev/null @@ -1,566 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -project_id = attribute('project_id') -location = attribute('location') -cluster_name = attribute('cluster_name') - -expected_accelerators_count = "1" -expected_accelerators_type = "nvidia-tesla-p4" - -control "gcloud" do - title "Google Compute Engine GKE configuration" - describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let!(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - - describe "cluster-autoscaling" do - it "has the expected cluster autoscaling settings" do - expect(data['autoscaling']).to include({ - "autoprovisioningNodePoolDefaults" => including({ - "imageType"=>"COS_CONTAINERD", - "oauthScopes" => %w(https://www.googleapis.com/auth/cloud-platform), - "serviceAccount" => "default" - }), - "autoscalingProfile" => "OPTIMIZE_UTILIZATION", - "enableNodeAutoprovisioning" => true, - "resourceLimits" => [ - { - "maximum" => "20", - "minimum" => "5", - "resourceType" => "cpu" - }, - { - "maximum" => "30", - "minimum" => "10", - "resourceType" => "memory" - } - ] - }) - end - end - - describe "node pools" do - let(:node_pools) { data['nodePools'].reject { |p| p['name'] == "default-pool" || p['name'] =~ %r{^nap-.*} } } - - it "has 5" do - expect(node_pools.count).to eq 5 - end - - describe "pool-01" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - ) - ) - end - - it "is the expected machine type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "machineType" => "e2-medium", - ), - ) - ) - end - - it "has the expected image type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "imageType" => "COS_CONTAINERD", - ), - ) - ) - end - - it "has autoscaling enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "autoscaling" => including( - "enabled" => true, - ), - ) - ) - end - - it "has the expected minimum node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "autoscaling" => including( - "minNodeCount" => 1, - ), - ) - ) - end - - it "has autorepair enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "management" => including( - "autoRepair" => true, - ), - ) - ) - end - - it "has automatic upgrades enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "management" => including( - "autoUpgrade" => true, - ), - ) - ) - end - - it "has the expected metadata" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "metadata" => including( - "shutdown-script" => "kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", - "disable-legacy-endpoints" => "false", - ), - ), - ) - ) - end - - it "has the expected labels" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "labels" => { - "all-pools-example" => "true", - "pool-01-example" => "true", - "cluster_name" => cluster_name, - "node_pool" => "pool-01", - }, - ), - ) - ) - end - - it "has the expected network tags" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "tags" => match_array([ - "all-node-example", - "pool-01-example", - "gke-#{cluster_name}", - "gke-#{cluster_name}-pool-01", - ]), - ), - ) - ) - end - - it "has the expected linux node config sysctls" do - expect(data['nodePools']).to include( - including( - "name" => "pool-01", - "config" => including( - "linuxNodeConfig" => including( - "sysctls" => including( - "net.core.netdev_max_backlog" => "10000", - "net.core.rmem_max" => "10000" - ) - ) - ) - ) - ) - end - end - - describe "pool-02" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - ) - ) - end - - it "is the expected machine type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "machineType" => "n1-standard-2", - ), - ) - ) - end - - it "has autoscaling enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "autoscaling" => including( - "enabled" => true, - ), - ) - ) - end - - it "has the expected minimum node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "autoscaling" => including( - "minNodeCount" => 1, - ), - ) - ) - end - - it "has the expected maximum node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "autoscaling" => including( - "maxNodeCount" => 2, - ), - ) - ) - end - -# TODO: Update/fix this test (manually tested) -# it "has the expected accelerators" do -# expect(data['nodePools']).to include( -# including( -# "name" => "pool-02", -# "config" => including( -# "accelerators" => [{"acceleratorCount" => expected_accelerators_count, -# "acceleratorType" => expected_accelerators_type}], -# ), -# ) -# ) -# end - - it "has the expected disk size" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "diskSizeGb" => 30, - ), - ) - ) - end - - it "has the expected disk type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "diskType" => "pd-standard", - ), - ) - ) - end - - it "has the expected image type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "imageType" => "COS_CONTAINERD", - ), - ) - ) - end - - it "has the expected labels" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "labels" => including( - "all-pools-example" => "true", - "cluster_name" => cluster_name, - "node_pool" => "pool-02", - ) - ), - ) - ) - end - - it "has the expected network tags" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "tags" => match_array([ - "all-node-example", - "gke-#{cluster_name}", - "gke-#{cluster_name}-pool-02", - ]) - ), - ) - ) - end - - it "has the expected linux node config sysctls" do - expect(data['nodePools']).to include( - including( - "name" => "pool-02", - "config" => including( - "linuxNodeConfig" => including( - "sysctls" => including( - "net.core.netdev_max_backlog" => "10000" - ) - ) - ) - ) - ) - end - end - - describe "pool-03" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - ) - ) - end - - it "is the expected machine type" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "machineType" => "n1-standard-2", - ), - ) - ) - end - - it "has autoscaling disabled" do - expect(data['nodePools']).not_to include( - including( - "name" => "pool-03", - "autoscaling" => including( - "enabled" => true, - ), - ) - ) - end - - it "has the expected node count" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "initialNodeCount" => 2 - ) - ) - end - - it "has autorepair enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "management" => including( - "autoRepair" => true, - ), - ) - ) - end - - it "has automatic upgrades enabled" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "management" => including( - "autoUpgrade" => true, - ), - ) - ) - end - - it "has the expected labels" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "labels" => { - "all-pools-example" => "true", - "cluster_name" => cluster_name, - "node_pool" => "pool-03", - "sandbox.gke.io/runtime"=>"gvisor" - }, - ), - ) - ) - end - - it "has the expected network tags" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "tags" => match_array([ - "all-node-example", - "gke-#{cluster_name}", - "gke-#{cluster_name}-pool-03", - ]), - ), - ) - ) - end - - it "has the expected pod range" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "networkConfig" => including( - "podIpv4CidrBlock" => "172.16.0.0/18", - "podRange" => "test" - ) - ) - ) - end - - it "has the expected image" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "imageType" => "COS_CONTAINERD", - ), - ) - ) - end - - it "has the expected kubelet config" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "kubeletConfig" => including( - "cpuManagerPolicy" => "static", - "cpuCfsQuota" => true - ) - ) - ) - ) - end - - it "has the expected linux node config sysctls" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "config" => including( - "linuxNodeConfig" => including( - "sysctls" => including( - "net.core.netdev_max_backlog" => "20000" - ) - ) - ) - ) - ) - end - end - - describe "pool-04" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-04", - ) - ) - end - - it "has queued_provisioning enabled" do - expect(data['nodePools']).not_to include( - including( - "name" => "pool-04", - "queued_provisioning" => including( - "enabled" => true, - ), - ) - ) - end - end - - describe "pool-05" do - it "exists" do - expect(data['nodePools']).to include( - including( - "name" => "pool-05", - ) - ) - end - - it "has enable_nested_virtualization enabled" do - expect(data['nodePools']).not_to include( - including( - "name" => "pool-05", - "advanced_machine_features" => including( - "enable_nested_virtualization" => true, - ), - ) - ) - end - end - end - end - - describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let!(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - - it "pool-03 has nodes in correct locations" do - expect(data['nodePools']).to include( - including( - "name" => "pool-03", - "locations" => match_array([ - "#{location}-b", - "#{location}-c", - ]), - ) - ) - end - end -end diff --git a/test/integration/node_pool/controls/kubectl.rb b/test/integration/node_pool/controls/kubectl.rb deleted file mode 100644 index 811ebcda0f..0000000000 --- a/test/integration/node_pool/controls/kubectl.rb +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -require 'kubeclient' -require 'rest-client' - -require 'base64' - -kubernetes_endpoint = attribute('kubernetes_endpoint') -client_token = attribute('client_token') -ca_certificate = attribute('ca_certificate') - -control "kubectl" do - title "Kubernetes configuration" - - describe "kubernetes" do - let(:kubernetes_http_endpoint) { "https://#{kubernetes_endpoint}/api" } - let(:client) do - cert_store = OpenSSL::X509::Store.new - cert_store.add_cert(OpenSSL::X509::Certificate.new(Base64.decode64(ca_certificate))) - Kubeclient::Client.new( - kubernetes_http_endpoint, - "v1", - ssl_options: { - cert_store: cert_store, - verify_ssl: OpenSSL::SSL::VERIFY_PEER, - }, - auth_options: { - bearer_token: Base64.decode64(client_token), - }, - ) - end - - describe "nodes" do - let(:all_nodes) { client.get_nodes } - let(:taints) { nodes.first.spec.taints.map { |t| t.to_h.select { |k, v| [:effect, :key, :value].include?(k.to_sym) } } } - - describe "pool-01" do - let(:nodes) do - all_nodes.select { |n| n.metadata.labels.node_pool == "pool-01" } - end - - it "has the expected taints" do - expect(taints).to eq([ - { - effect: "PreferNoSchedule", - key: "all-pools-example", - value: "true", - }, - { - effect: "PreferNoSchedule", - key: "pool-01-example", - value: "true", - }, - ]) - end - end - - describe "pool-02" do - let(:nodes) do - all_nodes.select { |n| n.metadata.labels.node_pool == "pool-02" } - end - - it "has the expected taints" do - expect(taints).to include( - { - effect: "PreferNoSchedule", - key: "all-pools-example", - value: "true", - } - ) - end - end - describe "pool-03" do - let(:nodes) do - all_nodes.select { |n| n.metadata.labels.node_pool == "pool-03" } - end - - it "has the expected taints" do - expect(taints).to include( - { - effect: "PreferNoSchedule", - key: "all-pools-example", - value: "true", - } - ) - end - end - end - end -end diff --git a/test/integration/node_pool/inspec.yml b/test/integration/node_pool/inspec.yml deleted file mode 100644 index b915e7d119..0000000000 --- a/test/integration/node_pool/inspec.yml +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: node_pool -attributes: - - name: project_id - required: true - type: string - - name: location - required: true - type: string - - name: cluster_name - required: true - type: string - - name: kubernetes_endpoint - required: true - type: string - - name: client_token - required: true - type: string - - name: ca_certificate - required: true - type: string diff --git a/test/integration/node_pool/node_pool_test.go b/test/integration/node_pool/node_pool_test.go new file mode 100644 index 0000000000..9aae6f5143 --- /dev/null +++ b/test/integration/node_pool/node_pool_test.go @@ -0,0 +1,165 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package node_pool + +import ( + "fmt" + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/stretchr/testify/assert" + "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils" + gkeutils "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/utils" +) + +func TestNodePool(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t, + tft.WithRetryableTerraformErrors(testutils.RetryableTransientErrors, 3, 2*time.Minute), + ) + + bpt.DefineVerify(func(assert *assert.Assertions) { + // Skipping Default Verify as the Verify Stage fails due to change in Client Cert Token + // bpt.DefaultVerify(assert) + gkeutils.TGKEVerify(t, bpt, assert) // Verify Resources + + projectId := bpt.GetStringOutput("project_id") + location := bpt.GetStringOutput("location") + clusterName := bpt.GetStringOutput("cluster_name") + + //cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId) + clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName) + cluster := gkeutils.GetProjectResources(t, projectId, gkeutils.WithAssetType("container.googleapis.com/Cluster")).Get("#(name=\"" + clusterResourceName + "\").resource.data") + + // Cluster + assert.Contains([]string{"RUNNING", "RECONCILING"}, cluster.Get("status").String(), "Cluster is Running") + assert.Equal("COS_CONTAINERD", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.imageType").String(), "has the expected image type") + assert.Equal("[\n \"https://www.googleapis.com/auth/cloud-platform\"\n ]", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes").String(), "has the expected oauth scopes") + assert.Equal("default", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount").String(), "has the expected service account") + assert.Equal("OPTIMIZE_UTILIZATION", cluster.Get("autoscaling.autoscalingProfile").String(), "has the expected autoscaling profile") + assert.True(cluster.Get("autoscaling.enableNodeAutoprovisioning").Bool(), "has the expected node autoprovisioning") + assert.JSONEq(`[ + { + "maximum": "20", + "minimum": "5", + "resourceType": "cpu" + }, + { + "maximum": "30", + "minimum": "10", + "resourceType": "memory" + } + ]`, + cluster.Get("autoscaling.resourceLimits").String(), "has the expected resource limits") + + // Pool-01 + assert.Equal("pool-01", cluster.Get("nodePools.#(name==\"pool-01\").name").String(), "pool-1 exists") + assert.Equal("e2-medium", cluster.Get("nodePools.#(name==\"pool-01\").config.machineType").String(), "is the expected machine type") + assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-01\").config.imageType").String(), "has the expected image") + assert.True(cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.enabled").Bool(), "has autoscaling enabled") + assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") + assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoRepair").Bool(), "has autorepair enabled") + assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") + assert.Equal("kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.shutdown-script").String(), "pool-2 exists") + assert.Equal("false", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.disable-legacy-endpoints").String(), "pool-2 exists") + assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "pool-01-example": "true", "cluster_name": "%s", "node_pool": "pool-01"}`, clusterName), + cluster.Get("nodePools.#(name==\"pool-01\").config.labels").String(), "has the expected labels") + assert.ElementsMatch([]string{"all-node-example", "pool-01-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-01", clusterName)}, + cluster.Get("nodePools.#(name==\"pool-01\").config.tags").Value().([]interface{}), "has the expected network tags") + assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config net.core.netdev_max_backlog sysctl") + assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.rmem_max").Int(), "has the expected linux node config net.core.rmem_max sysctl") + + // Pool-02 + assert.Equal("pool-02", cluster.Get("nodePools.#(name==\"pool-02\").name").String(), "pool-2 exists") + assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-02\").config.machineType").String(), "is the expected machine type") + assert.True(cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.enabled").Bool(), "has autoscaling enabled") + assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") + assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.maxNodeCount").Int(), "has the expected maximum node count") + assert.Equal(int64(30), cluster.Get("nodePools.#(name==\"pool-02\").config.diskSizeGb").Int(), "has the expected disk size") + assert.Equal("pd-standard", cluster.Get("nodePools.#(name==\"pool-02\").config.diskType").String(), "has the expected disk type") + assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-02\").config.imageType").String(), "has the expected image") + assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-02"}`, clusterName), + cluster.Get("nodePools.#(name==\"pool-02\").config.labels").String(), "has the expected labels") + assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-02", clusterName)}, + cluster.Get("nodePools.#(name==\"pool-02\").config.tags").Value().([]interface{}), "has the expected network tags") + assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-02\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") + + // Pool-03 + assert.Equal("pool-03", cluster.Get("nodePools.#(name==\"pool-03\").name").String(), "pool-3 exists") + assert.JSONEq(fmt.Sprintf(`["%s-b", "%s-c"]`, location, location), cluster.Get("nodePools.#(name==\"pool-03\").locations").String(), "has nodes in correct locations") + assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-03\").config.machineType").String(), "is the expected machine type") + assert.False(cluster.Get("nodePools.#(name==\"pool-03\").autoscaling.enabled").Bool(), "has autoscaling enabled") + assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-03\").initialNodeCount").Int(), "has the expected inital node count") + assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoRepair").Bool(), "has autorepair enabled") + assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") + assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-03", "sandbox.gke.io/runtime": "gvisor"}`, clusterName), + cluster.Get("nodePools.#(name==\"pool-03\").config.labels").String(), "has the expected labels") + assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-03", clusterName)}, + cluster.Get("nodePools.#(name==\"pool-03\").config.tags").Value().([]interface{}), "has the expected network tags") + assert.Equal("172.16.0.0/18", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podIpv4CidrBlock").String(), "has the expected pod range") + assert.Equal("test", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podRange").String(), "has the expected pod range") + assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-03\").config.imageType").String(), "has the expected image") + assert.Equal("static", cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuManagerPolicy").String(), "has the expected cpuManagerPolicy kubelet config") + assert.True(cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuCfsQuota").Bool(), "has the expected cpuCfsQuota kubelet config") + assert.Equal(int64(20000), cluster.Get("nodePools.#(name==\"pool-03\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") + + // Pool-04 + assert.Equal("pool-04", cluster.Get("nodePools.#(name==\"pool-04\").name").String(), "pool-4 exists") + assert.False(cluster.Get("nodePools.#(name==\"pool-04\").config.queuedProvisioning.enabled").Bool(), "has queued provisioning not enabled") + + // Pool-05 + assert.Equal("pool-05", cluster.Get("nodePools.#(name==\"pool-05\").name").String(), "pool-5 exists") + assert.True(cluster.Get("nodePools.#(name==\"pool-05\").config.advancedMachineFeatures.enableNestedVirtualization").Bool(), "has enable_nested_virtualization enabled") + + // K8s + gcloud.Runf(t, "container clusters get-credentials %s --region %s --project %s", clusterName, location, projectId) + k8sOpts := k8s.KubectlOptions{} + clusterNodesOp, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "nodes", "-o", "json") + assert.NoError(err) + clusterNodes := testutils.ParseKubectlJSONResult(t, clusterNodesOp) + assert.JSONEq(`[ + { + "effect": "PreferNoSchedule", + "key": "all-pools-example", + "value": "true" + }, + { + "effect": "PreferNoSchedule", + "key": "pool-01-example", + "value": "true" + } + ]`, + clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-01\").spec.taints").String(), "has the expected taints") + assert.JSONEq(`[ + { + "effect": "PreferNoSchedule", + "key": "all-pools-example", + "value": "true" + } + ]`, + clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-02\").spec.taints").String(), "has the expected all-pools-example taint") + assert.JSONEq(`[ + { + "effect": "PreferNoSchedule", + "key": "all-pools-example", + "value": "true" + } + ]`, + clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-03\").spec.taints").String(), "has the expected all-pools-example taint") + }) + + bpt.Test() +} diff --git a/test/integration/safer_cluster_iap_bastion/controls/e2e.rb b/test/integration/safer_cluster_iap_bastion/controls/e2e.rb deleted file mode 100644 index de185c4871..0000000000 --- a/test/integration/safer_cluster_iap_bastion/controls/e2e.rb +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -test_command = attribute('test_command') -cluster_version = attribute('cluster_version') -# pre run ssh command so that ssh-keygen can run -%x( #{test_command} ) -control "e2e" do - title "SSH into VM and verify connectivity to GKE" - describe command(test_command) do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - let!(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - describe "gke version" do - it "is correct" do - expect(data['gitVersion']).to eq "v#{cluster_version}" - end - end - end -end diff --git a/test/integration/safer_cluster_iap_bastion/inspec.yml b/test/integration/safer_cluster_iap_bastion/inspec.yml deleted file mode 100644 index 1ad18a1c00..0000000000 --- a/test/integration/safer_cluster_iap_bastion/inspec.yml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: safer_cluster -attributes: - - name: test_command - required: true - type: string - - name: cluster_version - required: true - type: string diff --git a/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go b/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go new file mode 100644 index 0000000000..2e46f573f9 --- /dev/null +++ b/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go @@ -0,0 +1,56 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package safer_cluster_iap_bastion + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/stretchr/testify/assert" + "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils" + gkeutils "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/utils" +) + +func TestSaferClusterIapBastion(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t, + tft.WithRetryableTerraformErrors(testutils.RetryableTransientErrors, 3, 2*time.Minute), + ) + + bpt.DefineVerify(func(assert *assert.Assertions) { + // Skipping Default Verify as the Verify Stage fails due to change in Client Cert Token + // bpt.DefaultVerify(assert) + gkeutils.TGKEVerify(t, bpt, assert) // Verify Resources + + test_command, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ") + + // pre run ssh command so that ssh-keygen can run + gcloud.RunCmd(t, test_command, + gcloud.WithCommonArgs([]string{}), + ) + + cluster_version := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version")) + + op := gcloud.Run(t, test_command, + gcloud.WithCommonArgs([]string{}), + ) + + assert.Equal(cluster_version, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE") + }) + + bpt.Test() +} diff --git a/test/integration/testutils/cai.go b/test/integration/testutils/cai.go new file mode 100644 index 0000000000..69f819d67d --- /dev/null +++ b/test/integration/testutils/cai.go @@ -0,0 +1,72 @@ +/** + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cai provides a set of helpers to interact with Cloud Asset Inventory +package utils + +import ( + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/tidwall/gjson" +) + +type CmdCfg struct { + sleep int // minutes to sleep prior to CAI retreval. default: 2 + assetType string // asset type to retrieve. default: all +} + +type cmdOption func(*CmdCfg) + +// newCmdConfig sets defaults and options +func newCmdConfig(opts ...cmdOption) (*CmdCfg) { + caiOpts := &CmdCfg{ + sleep: 2, + assetType: "", + } + + for _, opt := range opts { + opt(caiOpts) + } + + return caiOpts +} + +// Set custom sleep minutes +func WithSleep(sleep int) cmdOption { + return func(f *CmdCfg) { + f.sleep = sleep + } +} + +// Set asset type +func WithAssetType(assetType string) cmdOption { + return func(f *CmdCfg) { + f.assetType = assetType + } +} + +// GetProjectResources returns the cloud asset inventory resources for a project as a gjson.Result +func GetProjectResources(t testing.TB, project string, opts ...cmdOption) gjson.Result { + caiOpts := newCmdConfig(opts...) + time.Sleep(time.Duration(caiOpts.sleep) * time.Minute) + if caiOpts.assetType != "" { + return gcloud.Runf(t, "asset list --project=%s --asset-types=%s --content-type=resource", project, caiOpts.assetType) + } else { + return gcloud.Runf(t, "asset list --project=%s --content-type=resource", project) + } +} diff --git a/test/integration/workload_identity/controls/gcloud.rb b/test/integration/workload_identity/controls/gcloud.rb deleted file mode 100644 index 1c956052eb..0000000000 --- a/test/integration/workload_identity/controls/gcloud.rb +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -project_id = attribute('project_id') -location = attribute('location') -cluster_name = attribute('cluster_name') -wi_gsa_to_k8s_sa = { - attribute('default_wi_email') => attribute('default_wi_ksa_name'), - attribute('existing_ksa_email') => attribute('existing_ksa_name'), - attribute('existing_gsa_email') => attribute('existing_gsa_name') -} - -control "gcloud" do - title "Google Compute Engine GKE configuration" - describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json --format=\"json(nodePools[0].config.workloadMetadataConfig.nodeMetadata)\"") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let!(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - - describe "workload metada config" do - it "is secure" do - expect(data['nodePools'][0]["config"]["workloadMetadataConfig"]["nodeMetadata"]).to eq 'GKE_METADATA_SERVER' - end - end - end - - describe command("gcloud beta --project=#{project_id} container clusters --zone=#{location} describe #{cluster_name} --format=json --format=\"json(workloadIdentityConfig)\"") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let!(:data) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - - describe "workload identity config" do - it "is has correct namespace" do - expect(data["workloadIdentityConfig"]["identityNamespace"]).to eq "#{project_id}.svc.id.goog" - end - end - end - wi_gsa_to_k8s_sa.each do |gsa_email,ksa_name| - describe command("gcloud iam service-accounts get-iam-policy #{gsa_email} --format=json") do - its(:exit_status) { should eq 0 } - its(:stderr) { should eq '' } - - let!(:iam) do - if subject.exit_status == 0 - JSON.parse(subject.stdout) - else - {} - end - end - it "has expected workload identity user roles" do - expect(iam['bindings'][0]).to include("members" => ["serviceAccount:#{project_id}.svc.id.goog[default/#{ksa_name}]"], "role" => "roles/iam.workloadIdentityUser") - end - end - end -end diff --git a/test/integration/workload_identity/inspec.yml b/test/integration/workload_identity/inspec.yml deleted file mode 100644 index 61f2f306aa..0000000000 --- a/test/integration/workload_identity/inspec.yml +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: workload_metadata_config -attributes: - - name: cluster_name - required: true - type: string - - name: location - required: true - type: string - - name: project_id - required: true - type: string - - name: default_wi_email - required: true - type: string - - name: default_wi_ksa_name - required: true - type: string - - name: existing_ksa_email - required: true - type: string - - name: existing_ksa_name - required: true - type: string - - name: existing_gsa_email - required: true - type: string - - name: existing_gsa_name - required: true - type: string diff --git a/test/integration/workload_identity/workload_identity_test.go b/test/integration/workload_identity/workload_identity_test.go new file mode 100644 index 0000000000..fe06e5321c --- /dev/null +++ b/test/integration/workload_identity/workload_identity_test.go @@ -0,0 +1,49 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package workload_identity + +import ( + "fmt" + "testing" + "time" + + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/stretchr/testify/assert" + "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils" + gkeutils "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/utils" +) + +func TestWorkloadIdentity(t *testing.T) { + bpt := tft.NewTFBlueprintTest(t, + tft.WithRetryableTerraformErrors(testutils.RetryableTransientErrors, 3, 2*time.Minute), + ) + + bpt.DefineVerify(func(assert *assert.Assertions) { + // Skipping Default Verify as the Verify Stage fails due to change in Client Cert Token + // bpt.DefaultVerify(assert) + gkeutils.TGKEVerify(t, bpt, assert) // Verify Resources + + projectId := bpt.GetStringOutput("project_id") + location := bpt.GetStringOutput("location") + clusterName := bpt.GetStringOutput("cluster_name") + + op := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId) + assert.Contains([]string{"RUNNING", "RECONCILING"}, op.Get("status").String(), "Cluster is Running") + assert.Equal("GKE_METADATA", op.Get("nodePools.0.config.workloadMetadataConfig.mode").String(), "workload metada config is secure") + assert.Equal(fmt.Sprintf("%s.svc.id.goog", projectId), op.Get("workloadIdentityConfig.workloadPool").String(), "workload identity config has correct project") + }) + + bpt.Test() +} diff --git a/test/setup/iam.tf b/test/setup/iam.tf index fe97685dd8..fb9f30eb04 100644 --- a/test/setup/iam.tf +++ b/test/setup/iam.tf @@ -34,6 +34,8 @@ locals { "roles/iam.roleAdmin", "roles/iap.admin", "roles/gkehub.admin", + "roles/cloudasset.viewer", + "roles/serviceusage.serviceUsageConsumer" ] # roles as documented https://cloud.google.com/service-mesh/docs/installation-permissions diff --git a/test/setup/main.tf b/test/setup/main.tf index b94c404385..cef4cd1c41 100644 --- a/test/setup/main.tf +++ b/test/setup/main.tf @@ -39,7 +39,8 @@ locals { "iamcredentials.googleapis.com", "gkeconnect.googleapis.com", "privateca.googleapis.com", - "gkehub.googleapis.com" + "gkehub.googleapis.com", + "cloudasset.googleapis.com" ] }