From ca78c3e15024a882e3a1ff5f6ced0597722ddc42 Mon Sep 17 00:00:00 2001 From: sarahalsmiller <100602640+sarahalsmiller@users.noreply.github.com> Date: Mon, 16 Sep 2024 15:52:44 +0000 Subject: [PATCH] backport of commit b3803397f1ddb27056b0410dca98660f0db2b17c --- .changelog/21592.txt | 3 - .changelog/21616.txt | 3 - .changelog/21735.txt | 3 - .github/workflows/backport-assistant.yml | 2 +- .github/workflows/ce-merge-trigger.yml | 5 - ...t-1.19.x.yaml => nightly-test-1.14.x.yaml} | 18 +- .github/workflows/nightly-test-1.15.x.yaml | 9 - ...t-1.18.x.yaml => nightly-test-1.16.x.yaml} | 27 +- .github/workflows/nightly-test-1.17.x.yaml | 9 - .../nightly-test-integ-peering_commontopo.yml | 12 +- .../nightly-test-integrations-1.15.x.yml | 83 +- ...l => nightly-test-integrations-1.16.x.yml} | 169 +- .../nightly-test-integrations-1.17.x.yml | 80 +- .../nightly-test-integrations-1.18.x.yml | 482 --- .../workflows/nightly-test-integrations.yml | 56 +- .github/workflows/test-integrations.yml | 24 +- .release/versions.hcl | 5 +- CHANGELOG.md | 108 - Makefile | 5 +- acl/MockAuthorizer.go | 25 + acl/acl_test.go | 188 ++ acl/authorizer.go | 55 + acl/authorizer_test.go | 28 + acl/chained_authorizer.go | 29 + acl/policy.go | 35 +- acl/policy_authorizer.go | 73 + acl/policy_authorizer_test.go | 42 + acl/policy_test.go | 176 ++ agent/acl_endpoint_test.go | 29 +- agent/ae/ae.go | 8 + agent/agent.go | 67 +- agent/agent_endpoint_test.go | 49 +- agent/catalog_endpoint_test.go | 48 +- agent/config/builder.go | 21 +- agent/config/builder_test.go | 75 + agent/config/runtime_test.go | 18 + agent/config_endpoint_test.go | 21 +- agent/connect/uri.go | 28 + agent/connect/uri_service.go | 12 + agent/connect/uri_signing.go | 6 + agent/connect/uri_signing_test.go | 24 + agent/connect/uri_test.go | 55 + agent/connect/uri_workload_identity.go | 40 + agent/connect/uri_workload_identity_ce.go | 18 + agent/connect/uri_workload_identity_test.go | 31 + agent/consul/config_endpoint.go | 4 +- agent/consul/config_replication_test.go | 6 +- agent/consul/leader.go | 145 + agent/consul/leader_ce.go | 17 + agent/consul/leader_ce_test.go | 48 + agent/consul/leader_connect_ca.go | 18 +- agent/consul/leader_connect_ca_test.go | 24 +- agent/consul/leader_registrator_v2.go | 411 +++ agent/consul/leader_registrator_v2_test.go | 583 ++++ agent/consul/leader_test.go | 69 +- agent/consul/options.go | 29 + agent/consul/server.go | 168 +- agent/consul/server_ce.go | 1 + agent/consul/server_grpc.go | 30 +- agent/consul/server_test.go | 28 +- .../testdata/v2-resource-dependencies.md | 51 +- agent/consul/type_registry.go | 8 + .../dataplane/get_envoy_bootstrap_params.go | 76 +- .../get_envoy_bootstrap_params_test.go | 253 ++ .../services/dataplane/server.go | 5 + .../grpc-external/services/resource/delete.go | 17 + .../services/resource/delete_ce.go | 15 + .../services/resource/delete_test.go | 291 +- agent/grpc-external/services/resource/list.go | 4 + .../services/resource/list_by_owner.go | 4 + .../services/resource/list_by_owner_test.go | 2 + .../services/resource/list_test.go | 2 + .../services/resource/mutate_and_validate.go | 4 + .../resource/mutate_and_validate_test.go | 140 +- agent/grpc-external/services/resource/read.go | 4 + .../services/resource/read_test.go | 70 + .../services/resource/server_ce.go | 18 + .../services/resource/testing/builder.go | 64 +- .../services/resource/testing/builder_ce.go | 14 +- .../services/resource/testing/testing_ce.go | 45 + .../grpc-external/services/resource/watch.go | 4 + .../services/resource/watch_test.go | 2 + .../services/resource/write_status_test.go | 62 + .../services/resource/write_test.go | 282 ++ agent/health_endpoint_test.go | 22 +- agent/http.go | 81 +- agent/leafcert/generate.go | 11 +- agent/leafcert/leafcert_test_helpers.go | 48 +- agent/leafcert/structs.go | 19 +- .../proxycfg-sources/catalog/config_source.go | 12 +- .../catalog/config_source_oss.go | 15 + .../catalog/config_source_test.go | 52 +- .../catalog/mock_ConfigManager.go | 18 +- .../proxycfg-sources/catalog/mock_Watcher.go | 29 +- agent/proxycfg-sources/local/config_source.go | 12 +- .../local/mock_ConfigManager.go | 18 +- agent/proxycfg-sources/local/sync.go | 4 +- agent/proxycfg/manager.go | 22 +- agent/proxycfg/manager_test.go | 9 +- agent/proxycfg_test.go | 11 +- agent/rpc/peering/service_test.go | 7 +- agent/structs/acl.go | 9 +- agent/structs/acl_templated_policy.go | 42 +- agent/structs/acl_templated_policy_ce.go | 3 + agent/structs/acl_templated_policy_ce_test.go | 15 + .../policies/ce/workload-identity.hcl | 3 + .../schemas/workload-identity.json | 13 + agent/structs/config_entry.go | 9 +- agent/structs/config_entry_test.go | 10 +- agent/structs/connect_ca.go | 8 +- agent/structs/connect_proxy_config.go | 43 + agent/structs/errors.go | 6 + agent/testagent.go | 23 +- agent/ui_endpoint_test.go | 25 +- agent/uiserver/ui_template_data.go | 9 + agent/uiserver/uiserver_test.go | 51 +- agent/xds/accesslogs/accesslogs.go | 34 +- agent/xds/clusters.go | 2 + agent/xds/delta.go | 113 +- agent/xds/proxystateconverter/clusters.go | 1259 ++++++++ agent/xds/proxystateconverter/converter.go | 135 + agent/xds/proxystateconverter/endpoints.go | 674 +++++ .../proxystateconverter/failover_policy.go | 142 + .../proxystateconverter/failover_policy_ce.go | 14 + agent/xds/proxystateconverter/listeners.go | 1678 +++++++++++ .../proxystateconverter/locality_policy.go | 21 + .../proxystateconverter/locality_policy_ce.go | 14 + agent/xds/proxystateconverter/routes.go | 805 +++++ agent/xds/rbac_test.go | 456 +++ agent/xds/resources_test.go | 536 +++- agent/xds/server.go | 16 +- agent/xds/xds_protocol_helpers_test.go | 18 +- agent/xdsv2/cluster_resources.go | 405 +++ agent/xdsv2/endpoint_resources.go | 46 + agent/xdsv2/listener_resources.go | 1142 +++++++ agent/xdsv2/rbac_resources.go | 486 +++ agent/xdsv2/resources.go | 91 + agent/xdsv2/resources_test.go | 182 ++ agent/xdsv2/route_resources.go | 542 ++++ ...cit-destinations-tproxy-default-bar.golden | 116 + ...destinations-tproxy-default-default.golden | 116 + ...xplicit-destinations-tproxy-foo-bar.golden | 116 + ...cit-destinations-tproxy-foo-default.golden | 116 + .../l4-multi-destination-default-bar.golden | 217 ++ ...4-multi-destination-default-default.golden | 217 ++ .../l4-multi-destination-foo-bar.golden | 217 ++ .../l4-multi-destination-foo-default.golden | 217 ++ ...cit-destinations-tproxy-default-bar.golden | 116 + ...destinations-tproxy-default-default.golden | 116 + ...mplicit-destinations-tproxy-foo-bar.golden | 116 + ...cit-destinations-tproxy-foo-default.golden | 116 + ...on-ip-port-bind-address-default-bar.golden | 115 + ...p-port-bind-address-default-default.golden | 115 + ...nation-ip-port-bind-address-foo-bar.golden | 115 + ...on-ip-port-bind-address-foo-default.golden | 115 + ...nix-socket-bind-address-default-bar.golden | 58 + ...socket-bind-address-default-default.golden | 58 + ...on-unix-socket-bind-address-foo-bar.golden | 58 + ...nix-socket-bind-address-foo-default.golden | 58 + ...icit-destination-tproxy-default-bar.golden | 65 + ...-destination-tproxy-default-default.golden | 65 + ...implicit-destination-tproxy-foo-bar.golden | 65 + ...icit-destination-tproxy-foo-default.golden | 65 + ...mixed-multi-destination-default-bar.golden | 285 ++ ...d-multi-destination-default-default.golden | 285 ++ .../mixed-multi-destination-foo-bar.golden | 285 ++ ...mixed-multi-destination-foo-default.golden | 285 ++ ...cit-destinations-tproxy-default-bar.golden | 320 ++ ...destinations-tproxy-default-default.golden | 320 ++ ...mplicit-destinations-tproxy-foo-bar.golden | 320 ++ ...cit-destinations-tproxy-foo-default.golden | 320 ++ ...icit-destination-tproxy-default-bar.golden | 167 ++ ...-destination-tproxy-default-default.golden | 167 ++ ...implicit-destination-tproxy-foo-bar.golden | 167 ++ ...icit-destination-tproxy-foo-default.golden | 167 ++ ...ltiple-workloads-tproxy-default-bar.golden | 167 ++ ...le-workloads-tproxy-default-default.golden | 167 ++ ...h-multiple-workloads-tproxy-foo-bar.golden | 167 ++ ...ltiple-workloads-tproxy-foo-default.golden | 167 ++ .../source/l7-expose-paths-default-bar.golden | 87 + .../l7-expose-paths-default-default.golden | 87 + .../source/l7-expose-paths-foo-bar.golden | 87 + .../source/l7-expose-paths-foo-default.golden | 87 + ...and-inbound-connections-default-bar.golden | 127 + ...inbound-connections-default-default.golden | 127 + ...cal-and-inbound-connections-foo-bar.golden | 127 + ...and-inbound-connections-foo-default.golden | 127 + ...ses-with-specific-ports-default-bar.golden | 119 + ...with-specific-ports-default-default.golden | 119 + ...dresses-with-specific-ports-foo-bar.golden | 119 + ...ses-with-specific-ports-foo-default.golden | 119 + ...addresses-without-ports-default-bar.golden | 119 + ...esses-without-ports-default-default.golden | 119 + ...oad-addresses-without-ports-foo-bar.golden | 119 + ...addresses-without-ports-foo-default.golden | 119 + ...ses-with-specific-ports-default-bar.golden | 55 + ...with-specific-ports-default-default.golden | 55 + ...dresses-with-specific-ports-foo-bar.golden | 55 + ...ses-with-specific-ports-foo-default.golden | 55 + ...addresses-without-ports-default-bar.golden | 55 + ...esses-without-ports-default-default.golden | 55 + ...oad-addresses-without-ports-foo-bar.golden | 55 + ...addresses-without-ports-foo-default.golden | 55 + ...oad-with-only-mesh-port-default-bar.golden | 12 + ...with-only-mesh-port-default-default.golden | 12 + ...orkload-with-only-mesh-port-foo-bar.golden | 12 + ...oad-with-only-mesh-port-foo-default.golden | 12 + ...ses-with-specific-ports-default-bar.golden | 63 + ...with-specific-ports-default-default.golden | 63 + ...dresses-with-specific-ports-foo-bar.golden | 63 + ...ses-with-specific-ports-foo-default.golden | 63 + ...addresses-without-ports-default-bar.golden | 95 + ...esses-without-ports-default-default.golden | 95 + ...oad-addresses-without-ports-foo-bar.golden | 95 + ...addresses-without-ports-foo-default.golden | 95 + ...d-address-without-ports-default-bar.golden | 119 + ...dress-without-ports-default-default.golden | 119 + ...kload-address-without-ports-foo-bar.golden | 119 + ...d-address-without-ports-foo-default.golden | 119 + ...cit-destinations-tproxy-default-bar.golden | 49 + ...destinations-tproxy-default-default.golden | 49 + ...xplicit-destinations-tproxy-foo-bar.golden | 49 + ...cit-destinations-tproxy-foo-default.golden | 49 + .../l4-multi-destination-default-bar.golden | 91 + ...4-multi-destination-default-default.golden | 91 + .../l4-multi-destination-foo-bar.golden | 91 + .../l4-multi-destination-foo-default.golden | 91 + ...cit-destinations-tproxy-default-bar.golden | 49 + ...destinations-tproxy-default-default.golden | 49 + ...mplicit-destinations-tproxy-foo-bar.golden | 49 + ...cit-destinations-tproxy-foo-default.golden | 49 + ...on-ip-port-bind-address-default-bar.golden | 49 + ...p-port-bind-address-default-default.golden | 49 + ...nation-ip-port-bind-address-foo-bar.golden | 49 + ...on-ip-port-bind-address-foo-default.golden | 49 + ...nix-socket-bind-address-default-bar.golden | 28 + ...socket-bind-address-default-default.golden | 28 + ...on-unix-socket-bind-address-foo-bar.golden | 28 + ...nix-socket-bind-address-foo-default.golden | 28 + ...icit-destination-tproxy-default-bar.golden | 28 + ...-destination-tproxy-default-default.golden | 28 + ...implicit-destination-tproxy-foo-bar.golden | 28 + ...icit-destination-tproxy-foo-default.golden | 28 + ...mixed-multi-destination-default-bar.golden | 91 + ...d-multi-destination-default-default.golden | 91 + .../mixed-multi-destination-foo-bar.golden | 91 + ...mixed-multi-destination-foo-default.golden | 91 + ...cit-destinations-tproxy-default-bar.golden | 133 + ...destinations-tproxy-default-default.golden | 133 + ...mplicit-destinations-tproxy-foo-bar.golden | 133 + ...cit-destinations-tproxy-foo-default.golden | 133 + ...icit-destination-tproxy-default-bar.golden | 70 + ...-destination-tproxy-default-default.golden | 70 + ...implicit-destination-tproxy-foo-bar.golden | 70 + ...icit-destination-tproxy-foo-default.golden | 70 + ...ltiple-workloads-tproxy-default-bar.golden | 70 + ...le-workloads-tproxy-default-default.golden | 70 + ...h-multiple-workloads-tproxy-foo-bar.golden | 70 + ...ltiple-workloads-tproxy-foo-default.golden | 70 + .../source/l7-expose-paths-default-bar.golden | 5 + .../l7-expose-paths-default-default.golden | 5 + .../source/l7-expose-paths-foo-bar.golden | 5 + .../source/l7-expose-paths-foo-default.golden | 5 + ...and-inbound-connections-default-bar.golden | 5 + ...inbound-connections-default-default.golden | 5 + ...cal-and-inbound-connections-foo-bar.golden | 5 + ...and-inbound-connections-foo-default.golden | 5 + ...ses-with-specific-ports-default-bar.golden | 5 + ...with-specific-ports-default-default.golden | 5 + ...dresses-with-specific-ports-foo-bar.golden | 5 + ...ses-with-specific-ports-foo-default.golden | 5 + ...addresses-without-ports-default-bar.golden | 5 + ...esses-without-ports-default-default.golden | 5 + ...oad-addresses-without-ports-foo-bar.golden | 5 + ...addresses-without-ports-foo-default.golden | 5 + ...ses-with-specific-ports-default-bar.golden | 5 + ...with-specific-ports-default-default.golden | 5 + ...dresses-with-specific-ports-foo-bar.golden | 5 + ...ses-with-specific-ports-foo-default.golden | 5 + ...addresses-without-ports-default-bar.golden | 5 + ...esses-without-ports-default-default.golden | 5 + ...oad-addresses-without-ports-foo-bar.golden | 5 + ...addresses-without-ports-foo-default.golden | 5 + ...oad-with-only-mesh-port-default-bar.golden | 5 + ...with-only-mesh-port-default-default.golden | 5 + ...orkload-with-only-mesh-port-foo-bar.golden | 5 + ...oad-with-only-mesh-port-foo-default.golden | 5 + ...ses-with-specific-ports-default-bar.golden | 5 + ...with-specific-ports-default-default.golden | 5 + ...dresses-with-specific-ports-foo-bar.golden | 5 + ...ses-with-specific-ports-foo-default.golden | 5 + ...addresses-without-ports-default-bar.golden | 5 + ...esses-without-ports-default-default.golden | 5 + ...oad-addresses-without-ports-foo-bar.golden | 5 + ...addresses-without-ports-foo-default.golden | 5 + ...d-address-without-ports-default-bar.golden | 5 + ...dress-without-ports-default-default.golden | 5 + ...kload-address-without-ports-foo-bar.golden | 5 + ...d-address-without-ports-foo-default.golden | 5 + ...cit-destinations-tproxy-default-bar.golden | 90 + ...destinations-tproxy-default-default.golden | 90 + ...xplicit-destinations-tproxy-foo-bar.golden | 90 + ...cit-destinations-tproxy-foo-default.golden | 90 + .../l4-multi-destination-default-bar.golden | 137 + ...4-multi-destination-default-default.golden | 137 + .../l4-multi-destination-foo-bar.golden | 137 + .../l4-multi-destination-foo-default.golden | 137 + ...cit-destinations-tproxy-default-bar.golden | 86 + ...destinations-tproxy-default-default.golden | 86 + ...mplicit-destinations-tproxy-foo-bar.golden | 86 + ...cit-destinations-tproxy-foo-default.golden | 86 + ...on-ip-port-bind-address-default-bar.golden | 47 + ...p-port-bind-address-default-default.golden | 47 + ...nation-ip-port-bind-address-foo-bar.golden | 47 + ...on-ip-port-bind-address-foo-default.golden | 47 + ...nix-socket-bind-address-default-bar.golden | 32 + ...socket-bind-address-default-default.golden | 32 + ...on-unix-socket-bind-address-foo-bar.golden | 32 + ...nix-socket-bind-address-foo-default.golden | 32 + ...icit-destination-tproxy-default-bar.golden | 61 + ...-destination-tproxy-default-default.golden | 61 + ...implicit-destination-tproxy-foo-bar.golden | 61 + ...icit-destination-tproxy-foo-default.golden | 61 + ...mixed-multi-destination-default-bar.golden | 119 + ...d-multi-destination-default-default.golden | 119 + .../mixed-multi-destination-foo-bar.golden | 119 + ...mixed-multi-destination-foo-default.golden | 119 + ...cit-destinations-tproxy-default-bar.golden | 222 ++ ...destinations-tproxy-default-default.golden | 222 ++ ...mplicit-destinations-tproxy-foo-bar.golden | 222 ++ ...cit-destinations-tproxy-foo-default.golden | 222 ++ ...icit-destination-tproxy-default-bar.golden | 125 + ...-destination-tproxy-default-default.golden | 125 + ...implicit-destination-tproxy-foo-bar.golden | 125 + ...icit-destination-tproxy-foo-default.golden | 125 + ...ltiple-workloads-tproxy-default-bar.golden | 125 + ...le-workloads-tproxy-default-default.golden | 125 + ...h-multiple-workloads-tproxy-foo-bar.golden | 125 + ...ltiple-workloads-tproxy-foo-default.golden | 125 + .../source/l7-expose-paths-default-bar.golden | 201 ++ .../l7-expose-paths-default-default.golden | 201 ++ .../source/l7-expose-paths-foo-bar.golden | 201 ++ .../source/l7-expose-paths-foo-default.golden | 201 ++ ...and-inbound-connections-default-bar.golden | 309 ++ ...inbound-connections-default-default.golden | 309 ++ ...cal-and-inbound-connections-foo-bar.golden | 309 ++ ...and-inbound-connections-foo-default.golden | 309 ++ ...ses-with-specific-ports-default-bar.golden | 443 +++ ...with-specific-ports-default-default.golden | 443 +++ ...dresses-with-specific-ports-foo-bar.golden | 443 +++ ...ses-with-specific-ports-foo-default.golden | 443 +++ ...addresses-without-ports-default-bar.golden | 359 +++ ...esses-without-ports-default-default.golden | 359 +++ ...oad-addresses-without-ports-foo-bar.golden | 359 +++ ...addresses-without-ports-foo-default.golden | 359 +++ ...ses-with-specific-ports-default-bar.golden | 128 + ...with-specific-ports-default-default.golden | 128 + ...dresses-with-specific-ports-foo-bar.golden | 128 + ...ses-with-specific-ports-foo-default.golden | 128 + ...addresses-without-ports-default-bar.golden | 128 + ...esses-without-ports-default-default.golden | 128 + ...oad-addresses-without-ports-foo-bar.golden | 128 + ...addresses-without-ports-foo-default.golden | 128 + ...oad-with-only-mesh-port-default-bar.golden | 40 + ...with-only-mesh-port-default-default.golden | 40 + ...orkload-with-only-mesh-port-foo-bar.golden | 40 + ...oad-with-only-mesh-port-foo-default.golden | 40 + ...ses-with-specific-ports-default-bar.golden | 206 ++ ...with-specific-ports-default-default.golden | 206 ++ ...dresses-with-specific-ports-foo-bar.golden | 206 ++ ...ses-with-specific-ports-foo-default.golden | 206 ++ ...addresses-without-ports-default-bar.golden | 309 ++ ...esses-without-ports-default-default.golden | 309 ++ ...oad-addresses-without-ports-foo-bar.golden | 309 ++ ...addresses-without-ports-foo-default.golden | 309 ++ ...d-address-without-ports-default-bar.golden | 359 +++ ...dress-without-ports-default-default.golden | 359 +++ ...kload-address-without-ports-foo-bar.golden | 359 +++ ...d-address-without-ports-foo-default.golden | 359 +++ ...cit-destinations-tproxy-default-bar.golden | 5 + ...destinations-tproxy-default-default.golden | 5 + ...xplicit-destinations-tproxy-foo-bar.golden | 5 + ...cit-destinations-tproxy-foo-default.golden | 5 + .../l4-multi-destination-default-bar.golden | 5 + ...4-multi-destination-default-default.golden | 5 + .../l4-multi-destination-foo-bar.golden | 5 + .../l4-multi-destination-foo-default.golden | 5 + ...cit-destinations-tproxy-default-bar.golden | 5 + ...destinations-tproxy-default-default.golden | 5 + ...mplicit-destinations-tproxy-foo-bar.golden | 5 + ...cit-destinations-tproxy-foo-default.golden | 5 + ...on-ip-port-bind-address-default-bar.golden | 5 + ...p-port-bind-address-default-default.golden | 5 + ...nation-ip-port-bind-address-foo-bar.golden | 5 + ...on-ip-port-bind-address-foo-default.golden | 5 + ...nix-socket-bind-address-default-bar.golden | 5 + ...socket-bind-address-default-default.golden | 5 + ...on-unix-socket-bind-address-foo-bar.golden | 5 + ...nix-socket-bind-address-foo-default.golden | 5 + ...icit-destination-tproxy-default-bar.golden | 5 + ...-destination-tproxy-default-default.golden | 5 + ...implicit-destination-tproxy-foo-bar.golden | 5 + ...icit-destination-tproxy-foo-default.golden | 5 + ...mixed-multi-destination-default-bar.golden | 66 + ...d-multi-destination-default-default.golden | 66 + .../mixed-multi-destination-foo-bar.golden | 66 + ...mixed-multi-destination-foo-default.golden | 66 + ...cit-destinations-tproxy-default-bar.golden | 51 + ...destinations-tproxy-default-default.golden | 51 + ...mplicit-destinations-tproxy-foo-bar.golden | 51 + ...cit-destinations-tproxy-foo-default.golden | 51 + ...icit-destination-tproxy-default-bar.golden | 29 + ...-destination-tproxy-default-default.golden | 29 + ...implicit-destination-tproxy-foo-bar.golden | 29 + ...icit-destination-tproxy-foo-default.golden | 29 + ...ltiple-workloads-tproxy-default-bar.golden | 29 + ...le-workloads-tproxy-default-default.golden | 29 + ...h-multiple-workloads-tproxy-foo-bar.golden | 29 + ...ltiple-workloads-tproxy-foo-default.golden | 29 + .../source/l7-expose-paths-default-bar.golden | 5 + .../l7-expose-paths-default-default.golden | 5 + .../source/l7-expose-paths-foo-bar.golden | 5 + .../source/l7-expose-paths-foo-default.golden | 5 + ...and-inbound-connections-default-bar.golden | 5 + ...inbound-connections-default-default.golden | 5 + ...cal-and-inbound-connections-foo-bar.golden | 5 + ...and-inbound-connections-foo-default.golden | 5 + ...ses-with-specific-ports-default-bar.golden | 5 + ...with-specific-ports-default-default.golden | 5 + ...dresses-with-specific-ports-foo-bar.golden | 5 + ...ses-with-specific-ports-foo-default.golden | 5 + ...addresses-without-ports-default-bar.golden | 5 + ...esses-without-ports-default-default.golden | 5 + ...oad-addresses-without-ports-foo-bar.golden | 5 + ...addresses-without-ports-foo-default.golden | 5 + ...ses-with-specific-ports-default-bar.golden | 5 + ...with-specific-ports-default-default.golden | 5 + ...dresses-with-specific-ports-foo-bar.golden | 5 + ...ses-with-specific-ports-foo-default.golden | 5 + ...addresses-without-ports-default-bar.golden | 5 + ...esses-without-ports-default-default.golden | 5 + ...oad-addresses-without-ports-foo-bar.golden | 5 + ...addresses-without-ports-foo-default.golden | 5 + ...oad-with-only-mesh-port-default-bar.golden | 5 + ...with-only-mesh-port-default-default.golden | 5 + ...orkload-with-only-mesh-port-foo-bar.golden | 5 + ...oad-with-only-mesh-port-foo-default.golden | 5 + ...ses-with-specific-ports-default-bar.golden | 5 + ...with-specific-ports-default-default.golden | 5 + ...dresses-with-specific-ports-foo-bar.golden | 5 + ...ses-with-specific-ports-foo-default.golden | 5 + ...addresses-without-ports-default-bar.golden | 5 + ...esses-without-ports-default-default.golden | 5 + ...oad-addresses-without-ports-foo-bar.golden | 5 + ...addresses-without-ports-foo-default.golden | 5 + ...d-address-without-ports-default-bar.golden | 5 + ...dress-without-ports-default-default.golden | 5 + ...kload-address-without-ports-foo-bar.golden | 5 + ...d-address-without-ports-foo-default.golden | 5 + api/acl.go | 13 +- api/config_entry_test.go | 6 +- api/go.sum | 2 +- command/acl/templatedpolicy/formatter.go | 2 + command/config/write/config_write_test.go | 25 +- command/resource/apply-grpc/apply_test.go | 2 +- command/resource/apply/apply_test.go | 4 +- command/resource/testdata/nested_data.hcl | 36 +- .../controller-architecture/testing.md | 2 + envoyextensions/xdscommon/ENVOY_VERSIONS | 6 +- go.mod | 4 +- go.sum | 12 - internal/auth/exports.go | 41 + .../auth/internal/controllers/register.go | 18 + .../controllers/trafficpermissions/builder.go | 96 + .../trafficpermissions/controller.go | 361 +++ .../trafficpermissions/controller_test.go | 1219 ++++++++ .../expander/expander_ce.go | 14 + .../expander/expander_ce/expander_ce.go | 35 + .../trafficpermissions/expander/interface.go | 20 + .../trafficpermissions/helpers_ce.go | 32 + .../controllers/trafficpermissions/index.go | 43 + .../controllers/trafficpermissions/status.go | 68 + .../traffic_permissions_mapper.go | 73 + .../types/computed_traffic_permissions.go | 77 + .../computed_traffic_permissions_test.go | 147 + internal/auth/internal/types/errors.go | 17 + .../types/namespace_traffic_permissions.go | 68 + .../namespace_traffic_permissions_test.go | 145 + .../types/partition_traffic_permissions.go | 68 + .../partition_traffic_permissions_test.go | 145 + .../internal/types/traffic_permissions.go | 148 + .../types/traffic_permissions_test.go | 1012 +++++++ internal/auth/internal/types/types.go | 28 + internal/auth/internal/types/validate.go | 203 ++ internal/auth/internal/types/validate_ce.go | 25 + .../auth/internal/types/workload_identity.go | 59 + .../internal/types/workload_identity_test.go | 104 + .../helpers/acl_hooks_test_helpers.go | 21 + .../v2beta1/api-service.json | 36 + .../v2beta1/api-workload-1-health.json | 31 + .../v2beta1/api-workload-1.json | 45 + .../v2beta1/api-workload-10-health.json | 31 + .../v2beta1/api-workload-10.json | 45 + .../v2beta1/api-workload-11-health.json | 31 + .../v2beta1/api-workload-11.json | 45 + .../v2beta1/api-workload-12-health.json | 31 + .../v2beta1/api-workload-12.json | 45 + .../v2beta1/api-workload-13-health.json | 31 + .../v2beta1/api-workload-13.json | 45 + .../v2beta1/api-workload-14-health.json | 31 + .../v2beta1/api-workload-14.json | 45 + .../v2beta1/api-workload-15-health.json | 31 + .../v2beta1/api-workload-15.json | 45 + .../v2beta1/api-workload-16-health.json | 31 + .../v2beta1/api-workload-16.json | 45 + .../v2beta1/api-workload-17-health.json | 31 + .../v2beta1/api-workload-17.json | 44 + .../v2beta1/api-workload-18-health.json | 31 + .../v2beta1/api-workload-18.json | 44 + .../v2beta1/api-workload-19-health.json | 31 + .../v2beta1/api-workload-19.json | 44 + .../v2beta1/api-workload-2-health.json | 31 + .../v2beta1/api-workload-2.json | 45 + .../v2beta1/api-workload-20-health.json | 31 + .../v2beta1/api-workload-20.json | 44 + .../v2beta1/api-workload-3-health.json | 31 + .../v2beta1/api-workload-3.json | 45 + .../v2beta1/api-workload-4-health.json | 31 + .../v2beta1/api-workload-4.json | 45 + .../v2beta1/api-workload-5-health.json | 31 + .../v2beta1/api-workload-5.json | 45 + .../v2beta1/api-workload-6-health.json | 31 + .../v2beta1/api-workload-6.json | 45 + .../v2beta1/api-workload-7-health.json | 31 + .../v2beta1/api-workload-7.json | 45 + .../v2beta1/api-workload-8-health.json | 31 + .../v2beta1/api-workload-8.json | 45 + .../v2beta1/api-workload-9-health.json | 31 + .../v2beta1/api-workload-9.json | 45 + .../v2beta1/foo-service-endpoints.json | 45 + .../v2beta1/foo-service.json | 23 + .../v2beta1/grpc-api-service.json | 41 + .../v2beta1/http-api-service.json | 28 + .../v2beta1/node-1-health.json | 29 + .../integration_test_data/v2beta1/node-1.json | 25 + .../v2beta1/node-2-health.json | 29 + .../integration_test_data/v2beta1/node-2.json | 25 + .../v2beta1/node-3-health.json | 29 + .../integration_test_data/v2beta1/node-3.json | 25 + .../v2beta1/node-4-health.json | 29 + .../integration_test_data/v2beta1/node-4.json | 25 + internal/catalog/catalogtest/run_test.go | 40 + .../catalogtest/test_integration_v2beta1.go | 764 +++++ .../catalogtest/test_lifecycle_v2beta1.go | 730 +++++ internal/catalog/exports.go | 113 + .../internal/controllers/endpoints/bound.go | 46 + .../controllers/endpoints/bound_test.go | 63 + .../controllers/endpoints/controller.go | 389 +++ .../controllers/endpoints/controller_test.go | 875 ++++++ .../internal/controllers/endpoints/status.go | 61 + .../controllers/failover/controller.go | 420 +++ .../controllers/failover/controller_test.go | 479 +++ .../failover/expander/expander_ce.go | 12 + .../failover/expander/expander_ce/expander.go | 38 + .../expander/expander_ce/expander_test.go | 67 + .../failover/expander/interface.go | 17 + .../controllers/failover/helpers_ce.go | 14 + .../internal/controllers/failover/status.go | 109 + .../controllers/nodehealth/controller.go | 104 + .../controllers/nodehealth/controller_test.go | 419 +++ .../internal/controllers/nodehealth/status.go | 54 + .../catalog/internal/controllers/register.go | 20 + .../controllers/workloadhealth/controller.go | 223 ++ .../workloadhealth/controller_test.go | 780 +++++ .../controllers/workloadhealth/status.go | 139 + .../testhelpers/acl_hooks_test_helpers.go | 198 ++ .../types/computed_failover_policy.go | 78 + .../types/computed_failover_policy_test.go | 250 ++ internal/catalog/internal/types/errors.go | 73 + .../catalog/internal/types/errors_test.go | 74 + .../catalog/internal/types/failover_policy.go | 359 +++ .../internal/types/failover_policy_test.go | 741 +++++ .../catalog/internal/types/health_checks.go | 85 + .../internal/types/health_checks_test.go | 207 ++ .../catalog/internal/types/health_status.go | 90 + .../internal/types/health_status_test.go | 274 ++ internal/catalog/internal/types/node.go | 89 + .../internal/types/node_health_status.go | 90 + .../internal/types/node_health_status_test.go | 271 ++ internal/catalog/internal/types/node_test.go | 174 ++ internal/catalog/internal/types/service.go | 132 + .../internal/types/service_endpoints.go | 176 ++ .../internal/types/service_endpoints_test.go | 322 ++ .../catalog/internal/types/service_test.go | 286 ++ .../errDNSPassingWeightOutOfRange.golden | 1 + .../errDNSWarningWeightOutOfRange.golden | 1 + .../errInvalidEndpointsOwnerName.golden | 1 + .../testdata/errInvalidNodeHostFormat.golden | 1 + .../testdata/errInvalidPhysicalPort.golden | 1 + .../testdata/errInvalidPortReference.golden | 1 + .../testdata/errInvalidVirtualPort.golden | 1 + .../errInvalidWorkloadHostFormat.golden | 1 + .../testdata/errLocalityZoneNoRegion.golden | 1 + .../types/testdata/errNotDNSLabel.golden | 1 + .../types/testdata/errNotIPAddress.golden | 1 + .../types/testdata/errTooMuchMesh.golden | 1 + .../testdata/errUnixSocketMultiport.golden | 1 + .../testdata/errVirtualPortReused.golden | 1 + internal/catalog/internal/types/types.go | 22 + internal/catalog/internal/types/types_test.go | 47 + internal/catalog/internal/types/validators.go | 411 +++ .../catalog/internal/types/validators_test.go | 844 ++++++ .../catalog/internal/types/virtual_ips.go | 52 + .../internal/types/virtual_ips_test.go | 127 + internal/catalog/internal/types/workload.go | 183 ++ .../catalog/internal/types/workload_test.go | 496 ++++ internal/catalog/workloadselector/acls.go | 47 + .../catalog/workloadselector/acls_test.go | 123 + internal/catalog/workloadselector/gather.go | 114 + .../catalog/workloadselector/gather_test.go | 258 ++ internal/catalog/workloadselector/index.go | 72 + .../catalog/workloadselector/index_test.go | 131 + .../catalog/workloadselector/integ_test.go | 151 + internal/catalog/workloadselector/mapper.go | 45 + .../catalog/workloadselector/mapper_test.go | 180 ++ .../catalog/workloadselector/selecting.go | 16 + .../cache/indexers/ref_indexer_test.go | 16 +- internal/controller/controllertest/builder.go | 6 + .../internal/controllers/link/controller.go | 22 +- .../controllers/link/controller_test.go | 67 + internal/hcp/internal/controllers/register.go | 6 +- internal/hcp/internal/types/link_test.go | 3 +- internal/mesh/exports.go | 52 + .../controllers/apigateways/controller.go | 106 + .../apigateways/controller_test.go | 166 ++ .../apigateways/fetcher/data_fetcher.go | 44 + .../apigateways/fetcher/data_fetcher_test.go | 113 + .../explicitdestinations/controller.go | 319 ++ .../explicitdestinations/controller_test.go | 957 ++++++ .../explicitdestinations/mapper/mapper.go | 74 + .../explicitdestinations/status.go | 121 + .../builder/api_gateway_builder.go | 154 + .../builder/mesh_gateway_builder.go | 426 +++ .../builder/mesh_gateway_builder_test.go | 343 +++ .../controllers/gatewayproxy/controller.go | 288 ++ .../gatewayproxy/controller_test.go | 318 ++ .../gatewayproxy/fetcher/data_fetcher.go | 165 + .../gatewayproxy/fetcher/data_fetcher_test.go | 266 ++ .../mapper/apigatewayworkloads.go | 55 + .../mapper/meshgatewayworkloads.go | 51 + .../implicitdestinations/auth_helper_test.go | 88 + .../implicitdestinations/controller.go | 314 ++ .../implicitdestinations/controller_test.go | 1573 ++++++++++ .../controllers/implicitdestinations/index.go | 194 ++ .../implicitdestinations/index_test.go | 256 ++ .../implicitdestinations/mapper.go | 171 ++ .../implicitdestinations/status.go | 7 + .../meshconfiguration/controller.go | 33 + .../meshconfiguration/controller_test.go | 30 + .../controllers/meshgateways/controller.go | 80 + .../proxyconfiguration/controller.go | 188 ++ .../proxyconfiguration/controller_test.go | 358 +++ .../controllers/proxyconfiguration/sort.go | 108 + .../proxyconfiguration/sort_test.go | 160 + .../mesh/internal/controllers/register.go | 60 + .../internal/controllers/routes/controller.go | 208 ++ .../controllers/routes/controller_test.go | 1687 +++++++++++ .../routes/destination_policy_validation.go | 60 + .../destination_policy_validation_test.go | 121 + .../internal/controllers/routes/generate.go | 861 ++++++ .../controllers/routes/generate_test.go | 1977 ++++++++++++ .../controllers/routes/intermediate.go | 72 + .../controllers/routes/loader/loader.go | 320 ++ .../controllers/routes/loader/loader_test.go | 442 +++ .../controllers/routes/loader/memoized.go | 93 + .../controllers/routes/loader/related.go | 233 ++ .../controllers/routes/pending_status.go | 92 + .../controllers/routes/ref_validation.go | 136 + .../controllers/routes/ref_validation_test.go | 275 ++ .../routes/routestest/routestest.go | 104 + .../internal/controllers/routes/sort_rules.go | 230 ++ .../controllers/routes/sort_rules_test.go | 492 +++ .../internal/controllers/routes/status.go | 238 ++ .../mesh/internal/controllers/routes/util.go | 20 + .../routes/xroutemapper/.mockery.yaml | 15 + .../controllers/routes/xroutemapper/util.go | 58 + .../routes/xroutemapper/xroutemapper.go | 298 ++ .../routes/xroutemapper/xroutemapper_test.go | 731 +++++ ...mock_ResolveFailoverServiceDestinations.go | 95 + .../sidecarproxy/builder/builder.go | 124 + .../sidecarproxy/builder/builder_test.go | 36 + .../builder/destination_multiport_test.go | 266 ++ .../sidecarproxy/builder/destinations.go | 706 +++++ .../sidecarproxy/builder/destinations_test.go | 576 ++++ .../sidecarproxy/builder/expose_paths.go | 153 + .../sidecarproxy/builder/expose_paths_test.go | 111 + .../sidecarproxy/builder/local_app.go | 563 ++++ .../builder/local_app_multiport_test.go | 173 ++ .../sidecarproxy/builder/local_app_test.go | 1122 +++++++ .../sidecarproxy/builder/naming.go | 49 + .../sidecarproxy/builder/routes.go | 595 ++++ ...cit-destinations-tproxy-default-bar.golden | 193 ++ ...destinations-tproxy-default-default.golden | 193 ++ ...xplicit-destinations-tproxy-foo-bar.golden | 193 ++ ...cit-destinations-tproxy-foo-default.golden | 193 ++ .../l4-multi-destination-default-bar.golden | 319 ++ ...4-multi-destination-default-default.golden | 319 ++ .../l4-multi-destination-foo-bar.golden | 319 ++ .../l4-multi-destination-foo-default.golden | 319 ++ ...cit-destinations-tproxy-default-bar.golden | 192 ++ ...destinations-tproxy-default-default.golden | 192 ++ ...mplicit-destinations-tproxy-foo-bar.golden | 192 ++ ...cit-destinations-tproxy-foo-default.golden | 192 ++ ...on-ip-port-bind-address-default-bar.golden | 164 + ...p-port-bind-address-default-default.golden | 164 + ...nation-ip-port-bind-address-foo-bar.golden | 164 + ...on-ip-port-bind-address-foo-default.golden | 164 + ...nix-socket-bind-address-default-bar.golden | 96 + ...socket-bind-address-default-default.golden | 96 + ...on-unix-socket-bind-address-foo-bar.golden | 96 + ...nix-socket-bind-address-foo-default.golden | 96 + ...icit-destination-tproxy-default-bar.golden | 127 + ...-destination-tproxy-default-default.golden | 127 + ...implicit-destination-tproxy-foo-bar.golden | 127 + ...icit-destination-tproxy-foo-default.golden | 127 + ...mixed-multi-destination-default-bar.golden | 414 +++ ...d-multi-destination-default-default.golden | 414 +++ .../mixed-multi-destination-foo-bar.golden | 414 +++ ...mixed-multi-destination-foo-default.golden | 414 +++ ...cit-destinations-tproxy-default-bar.golden | 494 +++ ...destinations-tproxy-default-default.golden | 494 +++ ...mplicit-destinations-tproxy-foo-bar.golden | 494 +++ ...cit-destinations-tproxy-foo-default.golden | 494 +++ ...icit-destination-tproxy-default-bar.golden | 275 ++ ...-destination-tproxy-default-default.golden | 275 ++ ...implicit-destination-tproxy-foo-bar.golden | 275 ++ ...icit-destination-tproxy-foo-default.golden | 275 ++ ...ltiple-workloads-tproxy-default-bar.golden | 275 ++ ...le-workloads-tproxy-default-default.golden | 275 ++ ...h-multiple-workloads-tproxy-foo-bar.golden | 275 ++ ...ltiple-workloads-tproxy-foo-default.golden | 275 ++ .../source/l7-expose-paths-default-bar.golden | 211 ++ .../l7-expose-paths-default-default.golden | 211 ++ .../source/l7-expose-paths-foo-bar.golden | 211 ++ .../source/l7-expose-paths-foo-default.golden | 211 ++ .../testdata/source/l7-expose-paths.golden | 211 ++ ...and-inbound-connections-default-bar.golden | 303 ++ ...inbound-connections-default-default.golden | 303 ++ ...cal-and-inbound-connections-foo-bar.golden | 303 ++ ...and-inbound-connections-foo-default.golden | 303 ++ .../local-and-inbound-connections.golden | 303 ++ ...ses-with-specific-ports-default-bar.golden | 338 +++ ...with-specific-ports-default-default.golden | 338 +++ ...dresses-with-specific-ports-foo-bar.golden | 338 +++ ...ses-with-specific-ports-foo-default.golden | 338 +++ ...kload-addresses-with-specific-ports.golden | 338 +++ ...addresses-without-ports-default-bar.golden | 290 ++ ...esses-without-ports-default-default.golden | 290 ++ ...oad-addresses-without-ports-foo-bar.golden | 290 ++ ...addresses-without-ports-foo-default.golden | 290 ++ ...le-workload-addresses-without-ports.golden | 290 ++ ...ses-with-specific-ports-default-bar.golden | 129 + ...with-specific-ports-default-default.golden | 129 + ...dresses-with-specific-ports-foo-bar.golden | 129 + ...ses-with-specific-ports-foo-default.golden | 129 + ...kload-addresses-with-specific-ports.golden | 129 + ...addresses-without-ports-default-bar.golden | 129 + ...esses-without-ports-default-default.golden | 129 + ...oad-addresses-without-ports-foo-bar.golden | 129 + ...addresses-without-ports-foo-default.golden | 129 + ...le-workload-addresses-without-ports.golden | 129 + ...d-address-without-ports-default-bar.golden | 129 + ...dress-without-ports-default-default.golden | 129 + ...kload-address-without-ports-foo-bar.golden | 129 + ...d-address-without-ports-foo-default.golden | 129 + ...ngle-workload-address-without-ports.golden | 129 + ...oad-with-only-mesh-port-default-bar.golden | 60 + ...with-only-mesh-port-default-default.golden | 60 + ...orkload-with-only-mesh-port-foo-bar.golden | 60 + ...oad-with-only-mesh-port-foo-default.golden | 60 + ...ort-l4-workload-with-only-mesh-port.golden | 60 + ...ses-with-specific-ports-default-bar.golden | 182 ++ ...with-specific-ports-default-default.golden | 182 ++ ...dresses-with-specific-ports-foo-bar.golden | 182 ++ ...ses-with-specific-ports-foo-default.golden | 182 ++ ...kload-addresses-with-specific-ports.golden | 182 ++ ...addresses-without-ports-default-bar.golden | 249 ++ ...esses-without-ports-default-default.golden | 249 ++ ...oad-addresses-without-ports-foo-bar.golden | 249 ++ ...addresses-without-ports-foo-default.golden | 249 ++ ...le-workload-addresses-without-ports.golden | 249 ++ ...d-address-without-ports-default-bar.golden | 249 ++ ...dress-without-ports-default-default.golden | 249 ++ ...kload-address-without-ports-foo-bar.golden | 249 ++ ...d-address-without-ports-foo-default.golden | 249 ++ ...ngle-workload-address-without-ports.golden | 249 ++ ...d-address-without-ports-default-bar.golden | 290 ++ ...dress-without-ports-default-default.golden | 290 ++ ...kload-address-without-ports-foo-bar.golden | 290 ++ ...d-address-without-ports-foo-default.golden | 290 ++ ...ngle-workload-address-without-ports.golden | 290 ++ .../controllers/sidecarproxy/controller.go | 350 +++ .../sidecarproxy/controller_test.go | 1004 +++++++ .../controllers/sidecarproxy/data_fetcher.go | 333 +++ .../sidecarproxy/data_fetcher_test.go | 516 ++++ .../controllers/sidecarproxy/helper_test.go | 87 + .../controllers/sidecarproxy/mapper.go | 210 ++ .../internal/controllers/xds/controller.go | 418 +++ .../controllers/xds/controller_test.go | 1333 +++++++++ .../controllers/xds/endpoint_builder.go | 78 + .../controllers/xds/endpoint_builder_test.go | 347 +++ .../internal/controllers/xds/leaf_cancels.go | 34 + .../internal/controllers/xds/leaf_mapper.go | 39 + .../internal/controllers/xds/mock_updater.go | 122 + .../controllers/xds/proxy_tracker_watch.go | 24 + .../controllers/xds/reconciliation_data.go | 61 + .../internal/controllers/xds/status/status.go | 131 + ...cit-destinations-tproxy-default-bar.golden | 185 ++ ...destinations-tproxy-default-default.golden | 184 ++ ...xplicit-destinations-tproxy-foo-bar.golden | 185 ++ ...cit-destinations-tproxy-foo-default.golden | 185 ++ .../l4-multi-destination-default-bar.golden | 301 ++ ...4-multi-destination-default-default.golden | 300 ++ .../l4-multi-destination-foo-bar.golden | 301 ++ .../l4-multi-destination-foo-default.golden | 301 ++ ...cit-destinations-tproxy-default-bar.golden | 184 ++ ...destinations-tproxy-default-default.golden | 183 ++ ...mplicit-destinations-tproxy-foo-bar.golden | 184 ++ ...cit-destinations-tproxy-foo-default.golden | 184 ++ ...on-ip-port-bind-address-default-bar.golden | 156 + ...p-port-bind-address-default-default.golden | 155 + ...nation-ip-port-bind-address-foo-bar.golden | 156 + ...on-ip-port-bind-address-foo-default.golden | 156 + ...nix-socket-bind-address-default-bar.golden | 93 + ...socket-bind-address-default-default.golden | 92 + ...on-unix-socket-bind-address-foo-bar.golden | 93 + ...nix-socket-bind-address-foo-default.golden | 93 + ...icit-destination-tproxy-default-bar.golden | 124 + ...-destination-tproxy-default-default.golden | 123 + ...implicit-destination-tproxy-foo-bar.golden | 124 + ...icit-destination-tproxy-foo-default.golden | 124 + ...mixed-multi-destination-default-bar.golden | 380 +++ ...d-multi-destination-default-default.golden | 379 +++ .../mixed-multi-destination-foo-bar.golden | 380 +++ ...mixed-multi-destination-foo-default.golden | 380 +++ ...cit-destinations-tproxy-default-bar.golden | 466 +++ ...destinations-tproxy-default-default.golden | 465 +++ ...mplicit-destinations-tproxy-foo-bar.golden | 466 +++ ...cit-destinations-tproxy-foo-default.golden | 466 +++ ...icit-destination-tproxy-default-bar.golden | 262 ++ ...-destination-tproxy-default-default.golden | 261 ++ ...implicit-destination-tproxy-foo-bar.golden | 262 ++ ...icit-destination-tproxy-foo-default.golden | 262 ++ ...ltiple-workloads-tproxy-default-bar.golden | 262 ++ ...le-workloads-tproxy-default-default.golden | 261 ++ ...h-multiple-workloads-tproxy-foo-bar.golden | 262 ++ ...ltiple-workloads-tproxy-foo-default.golden | 262 ++ .../source/l7-expose-paths-default-bar.golden | 213 ++ .../l7-expose-paths-default-default.golden | 212 ++ .../source/l7-expose-paths-foo-bar.golden | 213 ++ .../source/l7-expose-paths-foo-default.golden | 213 ++ .../testdata/source/l7-expose-paths.golden | 212 ++ ...and-inbound-connections-default-bar.golden | 305 ++ ...inbound-connections-default-default.golden | 304 ++ ...cal-and-inbound-connections-foo-bar.golden | 305 ++ ...and-inbound-connections-foo-default.golden | 305 ++ .../local-and-inbound-connections.golden | 304 ++ ...ses-with-specific-ports-default-bar.golden | 340 +++ ...with-specific-ports-default-default.golden | 339 +++ ...dresses-with-specific-ports-foo-bar.golden | 340 +++ ...ses-with-specific-ports-foo-default.golden | 340 +++ ...kload-addresses-with-specific-ports.golden | 339 +++ ...addresses-without-ports-default-bar.golden | 292 ++ ...esses-without-ports-default-default.golden | 291 ++ ...oad-addresses-without-ports-foo-bar.golden | 292 ++ ...addresses-without-ports-foo-default.golden | 292 ++ ...le-workload-addresses-without-ports.golden | 291 ++ ...ses-with-specific-ports-default-bar.golden | 131 + ...with-specific-ports-default-default.golden | 130 + ...dresses-with-specific-ports-foo-bar.golden | 131 + ...ses-with-specific-ports-foo-default.golden | 131 + ...kload-addresses-with-specific-ports.golden | 130 + ...addresses-without-ports-default-bar.golden | 131 + ...esses-without-ports-default-default.golden | 130 + ...oad-addresses-without-ports-foo-bar.golden | 131 + ...addresses-without-ports-foo-default.golden | 131 + ...le-workload-addresses-without-ports.golden | 130 + ...ngle-workload-address-without-ports.golden | 128 + ...oad-with-only-mesh-port-default-bar.golden | 62 + ...with-only-mesh-port-default-default.golden | 61 + ...orkload-with-only-mesh-port-foo-bar.golden | 62 + ...oad-with-only-mesh-port-foo-default.golden | 62 + ...ort-l4-workload-with-only-mesh-port.golden | 61 + ...ses-with-specific-ports-default-bar.golden | 184 ++ ...with-specific-ports-default-default.golden | 183 ++ ...dresses-with-specific-ports-foo-bar.golden | 184 ++ ...ses-with-specific-ports-foo-default.golden | 184 ++ ...kload-addresses-with-specific-ports.golden | 183 ++ ...addresses-without-ports-default-bar.golden | 251 ++ ...esses-without-ports-default-default.golden | 250 ++ ...oad-addresses-without-ports-foo-bar.golden | 251 ++ ...addresses-without-ports-foo-default.golden | 251 ++ ...le-workload-addresses-without-ports.golden | 250 ++ ...ngle-workload-address-without-ports.golden | 247 ++ ...d-address-without-ports-default-bar.golden | 292 ++ ...dress-without-ports-default-default.golden | 291 ++ ...kload-address-without-ports-foo-bar.golden | 292 ++ ...d-address-without-ports-foo-default.golden | 292 ++ ...ngle-workload-address-without-ports.golden | 291 ++ .../mappers/common/workload_selector_util.go | 58 + .../common/workload_selector_util_test.go | 68 + .../workload_selection_mapper.go | 80 + .../workload_selection_mapper_test.go | 145 + .../internal/meshindexes/computed_routes.go | 66 + .../meshindexes/computed_routes_test.go | 169 ++ internal/mesh/internal/types/api_gateway.go | 20 + .../types/computed_explicit_destinations.go | 19 + .../types/computed_implicit_destinations.go | 102 + .../computed_implicit_destinations_test.go | 268 ++ .../types/computed_proxy_configuration.go | 17 + .../mesh/internal/types/computed_routes.go | 152 + .../internal/types/computed_routes_test.go | 199 ++ internal/mesh/internal/types/decoded.go | 36 + .../mesh/internal/types/destination_policy.go | 275 ++ .../internal/types/destination_policy_test.go | 609 ++++ internal/mesh/internal/types/destinations.go | 169 ++ .../types/destinations_configuration.go | 40 + .../types/destinations_configuration_test.go | 90 + .../mesh/internal/types/destinations_test.go | 414 +++ internal/mesh/internal/types/errors.go | 16 + internal/mesh/internal/types/grpc_route.go | 237 ++ .../mesh/internal/types/grpc_route_test.go | 653 ++++ internal/mesh/internal/types/http_route.go | 350 +++ .../mesh/internal/types/http_route_test.go | 911 ++++++ .../mesh/internal/types/intermediate/types.go | 24 + .../mesh/internal/types/mesh_configuration.go | 22 + internal/mesh/internal/types/mesh_gateway.go | 44 + .../mesh/internal/types/mesh_gateway_test.go | 97 + .../internal/types/proxy_configuration.go | 211 ++ .../types/proxy_configuration_test.go | 360 +++ .../internal/types/proxy_state_template.go | 202 ++ .../types/proxy_state_template_test.go | 191 ++ internal/mesh/internal/types/tcp_route.go | 104 + .../mesh/internal/types/tcp_route_test.go | 237 ++ internal/mesh/internal/types/types.go | 27 + internal/mesh/internal/types/types_test.go | 49 + internal/mesh/internal/types/util.go | 100 + internal/mesh/internal/types/xroute.go | 335 +++ internal/mesh/internal/types/xroute_test.go | 559 ++++ .../mesh/proxy-snapshot/proxy_snapshot.go | 20 + .../mesh/proxy-tracker/mock_SessionLimiter.go | 53 + .../mesh/proxy-tracker/proxy_state_exports.go | 50 + .../proxy-tracker/proxy_state_exports_test.go | 77 + internal/mesh/proxy-tracker/proxy_tracker.go | 267 ++ .../mesh/proxy-tracker/proxy_tracker_test.go | 340 +++ internal/multicluster/exports.go | 21 + .../controllers/exportedservices/builder.go | 166 ++ .../exportedservices/controller.go | 390 +++ .../exportedservices/controller_test.go | 976 ++++++ .../exportedservices/expander/expander_ce.go | 12 + .../expander/expander_ce/expander.go | 47 + .../expander/expander_ce/expander_test.go | 57 + .../exportedservices/expander/types/types.go | 10 + .../exportedservices/helpers_ce.go | 14 + .../controllers/exportedservices/status.go | 42 + .../internal/controllers/register.go | 9 + .../controllers/v1compat/controller.go | 9 +- .../controllers/v1compat/controller_test.go | 3 +- .../multicluster/internal/types/decoded.go | 3 + internal/multicluster/internal/types/types.go | 8 + .../multicluster/internal/types/types_ce.go | 12 + internal/resource/demo/demo.go | 18 - .../selectiontracker/selection_tracker.go | 209 ++ .../selection_tracker_test.go | 375 +++ internal/resource/resource_test.go | 8 +- .../resourcehcl/testdata/destinations.golden | 1 + .../resourcehcl/testdata/destinations.hcl | 25 + .../resourcehcl/testdata/no-blocks.golden | 1 + internal/resourcehcl/testdata/no-blocks.hcl | 33 + internal/resourcehcl/unmarshal_test.go | 3 + internal/tenancy/exports.go | 34 + .../tenancy/internal/bridge/tenancy_bridge.go | 76 + .../internal/bridge/tenancy_bridge_ce.go | 21 + .../internal/controllers/common/common.go | 196 ++ .../controllers/namespace/controller.go | 94 + .../tenancy/internal/controllers/register.go | 12 + .../internal/controllers/register_ce.go | 15 + internal/tenancy/internal/types/errors.go | 11 + internal/tenancy/internal/types/namespace.go | 69 + internal/tenancy/internal/types/types.go | 10 + internal/tenancy/internal/types/types_ce.go | 12 + internal/tenancy/internal/types/types_test.go | 121 + .../tenancytest/namespace_controller_test.go | 156 + .../tenancy/tenancytest/namespace_test.go | 116 + proto-public/go.mod | 8 + proto-public/go.sum | 20 + .../computed_traffic_permissions.pb.binary.go | 18 + .../computed_traffic_permissions.pb.go | 223 ++ .../computed_traffic_permissions.proto | 22 + ...mputed_traffic_permissions_deepcopy.gen.go | 27 + .../computed_traffic_permissions_json.gen.go | 22 + .../pbauth/v2beta1/resources.rtypes.go | 50 + .../v2beta1/traffic_permission_extras_test.go | 63 + .../v2beta1/traffic_permissions.pb.binary.go | 108 + .../pbauth/v2beta1/traffic_permissions.pb.go | 1194 ++++++++ .../pbauth/v2beta1/traffic_permissions.proto | 141 + .../v2beta1/traffic_permissions_addon.go | 25 + .../traffic_permissions_deepcopy.gen.go | 216 ++ .../v2beta1/traffic_permissions_extras.go | 60 + .../v2beta1/traffic_permissions_json.gen.go | 121 + .../v2beta1/workload_identity.pb.binary.go | 18 + .../pbauth/v2beta1/workload_identity.pb.go | 158 + .../pbauth/v2beta1/workload_identity.proto | 12 + .../v2beta1/workload_identity_deepcopy.gen.go | 27 + .../v2beta1/workload_identity_json.gen.go | 22 + .../computed_failover_policy.pb.binary.go | 18 + .../v2beta1/computed_failover_policy.pb.go | 227 ++ .../v2beta1/computed_failover_policy.proto | 26 + .../computed_failover_policy_deepcopy.gen.go | 27 + .../computed_failover_policy_extras.go | 48 + .../computed_failover_policy_extras_test.go | 70 + .../computed_failover_policy_json.gen.go | 22 + .../v2beta1/failover_policy.pb.binary.go | 38 + .../pbcatalog/v2beta1/failover_policy.pb.go | 467 +++ .../pbcatalog/v2beta1/failover_policy.proto | 60 + .../v2beta1/failover_policy_deepcopy.gen.go | 69 + .../v2beta1/failover_policy_extras.go | 15 + .../v2beta1/failover_policy_extras_test.go | 110 + .../v2beta1/failover_policy_json.gen.go | 44 + .../pbcatalog/v2beta1/health.pb.binary.go | 108 + proto-public/pbcatalog/v2beta1/health.pb.go | 1158 ++++++++ proto-public/pbcatalog/v2beta1/health.proto | 105 + .../pbcatalog/v2beta1/health_deepcopy.gen.go | 216 ++ .../pbcatalog/v2beta1/health_json.gen.go | 121 + .../pbcatalog/v2beta1/node.pb.binary.go | 28 + proto-public/pbcatalog/v2beta1/node.pb.go | 247 ++ proto-public/pbcatalog/v2beta1/node.proto | 23 + .../pbcatalog/v2beta1/node_deepcopy.gen.go | 48 + .../pbcatalog/v2beta1/node_json.gen.go | 33 + proto-public/pbcatalog/v2beta1/protocol.pb.go | 171 ++ proto-public/pbcatalog/v2beta1/protocol.proto | 19 + .../pbcatalog/v2beta1/resources.rtypes.go | 85 + .../pbcatalog/v2beta1/selector.pb.binary.go | 18 + proto-public/pbcatalog/v2beta1/selector.pb.go | 185 ++ proto-public/pbcatalog/v2beta1/selector.proto | 13 + .../v2beta1/selector_deepcopy.gen.go | 27 + .../pbcatalog/v2beta1/selector_json.gen.go | 22 + .../pbcatalog/v2beta1/service.pb.binary.go | 28 + proto-public/pbcatalog/v2beta1/service.pb.go | 310 ++ proto-public/pbcatalog/v2beta1/service.proto | 46 + .../pbcatalog/v2beta1/service_addon.go | 120 + .../pbcatalog/v2beta1/service_addon_test.go | 208 ++ .../pbcatalog/v2beta1/service_deepcopy.gen.go | 48 + .../v2beta1/service_endpoints.pb.binary.go | 28 + .../pbcatalog/v2beta1/service_endpoints.pb.go | 336 +++ .../pbcatalog/v2beta1/service_endpoints.proto | 41 + .../v2beta1/service_endpoints_addon.go | 29 + .../v2beta1/service_endpoints_addon_test.go | 52 + .../v2beta1/service_endpoints_deepcopy.gen.go | 48 + .../v2beta1/service_endpoints_json.gen.go | 33 + .../pbcatalog/v2beta1/service_json.gen.go | 33 + .../pbcatalog/v2beta1/vip.pb.binary.go | 28 + proto-public/pbcatalog/v2beta1/vip.pb.go | 247 ++ proto-public/pbcatalog/v2beta1/vip.proto | 24 + .../pbcatalog/v2beta1/vip_deepcopy.gen.go | 48 + .../pbcatalog/v2beta1/vip_json.gen.go | 33 + .../pbcatalog/v2beta1/workload.pb.binary.go | 68 + proto-public/pbcatalog/v2beta1/workload.pb.go | 675 +++++ proto-public/pbcatalog/v2beta1/workload.proto | 81 + .../pbcatalog/v2beta1/workload_addon.go | 80 + .../pbcatalog/v2beta1/workload_addon_test.go | 262 ++ .../v2beta1/workload_deepcopy.gen.go | 132 + .../pbcatalog/v2beta1/workload_json.gen.go | 77 + proto-public/pbdataplane/dataplane.pb.go | 296 +- proto-public/pbdataplane/dataplane.proto | 4 +- .../pbmesh/v2beta1/api_gateway.pb.binary.go | 38 + proto-public/pbmesh/v2beta1/api_gateway.pb.go | 398 +++ proto-public/pbmesh/v2beta1/api_gateway.proto | 54 + .../v2beta1/api_gateway_deepcopy.gen.go | 69 + .../pbmesh/v2beta1/api_gateway_json.gen.go | 44 + .../pbmesh/v2beta1/common.pb.binary.go | 28 + proto-public/pbmesh/v2beta1/common.pb.go | 280 ++ proto-public/pbmesh/v2beta1/common.proto | 37 + .../pbmesh/v2beta1/common_deepcopy.gen.go | 48 + .../pbmesh/v2beta1/common_json.gen.go | 33 + ...omputed_explicit_destinations.pb.binary.go | 18 + .../computed_explicit_destinations.pb.go | 180 ++ .../computed_explicit_destinations.proto | 16 + ...uted_explicit_destinations_deepcopy.gen.go | 27 + ...computed_explicit_destinations_json.gen.go | 22 + .../computed_gateway_routes.pb.binary.go | 18 + .../v2beta1/computed_gateway_routes.pb.go | 215 ++ .../v2beta1/computed_gateway_routes.proto | 27 + .../computed_gateway_routes_deepcopy.gen.go | 27 + .../computed_gateway_routes_json.gen.go | 22 + ...omputed_implicit_destinations.pb.binary.go | 28 + .../computed_implicit_destinations.pb.go | 273 ++ .../computed_implicit_destinations.proto | 24 + ...uted_implicit_destinations_deepcopy.gen.go | 48 + ...computed_implicit_destinations_json.gen.go | 33 + .../computed_proxy_configuration.pb.binary.go | 18 + .../computed_proxy_configuration.pb.go | 199 ++ .../computed_proxy_configuration.proto | 21 + ...mputed_proxy_configuration_deepcopy.gen.go | 27 + .../computed_proxy_configuration_json.gen.go | 22 + .../v2beta1/computed_routes.pb.binary.go | 148 + .../pbmesh/v2beta1/computed_routes.pb.go | 1625 ++++++++++ .../pbmesh/v2beta1/computed_routes.proto | 175 ++ .../v2beta1/computed_routes_deepcopy.gen.go | 300 ++ .../v2beta1/computed_routes_json.gen.go | 165 + .../pbmesh/v2beta1/connection.pb.binary.go | 28 + proto-public/pbmesh/v2beta1/connection.pb.go | 328 ++ proto-public/pbmesh/v2beta1/connection.proto | 30 + .../pbmesh/v2beta1/connection_deepcopy.gen.go | 48 + .../pbmesh/v2beta1/connection_json.gen.go | 33 + .../v2beta1/destination_policy.pb.binary.go | 88 + .../pbmesh/v2beta1/destination_policy.pb.go | 1106 +++++++ .../pbmesh/v2beta1/destination_policy.proto | 163 + .../destination_policy_deepcopy.gen.go | 174 ++ .../v2beta1/destination_policy_json.gen.go | 99 + .../pbmesh/v2beta1/destinations.pb.binary.go | 58 + .../pbmesh/v2beta1/destinations.pb.go | 671 +++++ .../pbmesh/v2beta1/destinations.proto | 80 + .../destinations_configuration.pb.binary.go | 58 + .../v2beta1/destinations_configuration.pb.go | 704 +++++ .../v2beta1/destinations_configuration.proto | 112 + ...destinations_configuration_deepcopy.gen.go | 111 + .../destinations_configuration_json.gen.go | 66 + .../v2beta1/destinations_deepcopy.gen.go | 111 + .../pbmesh/v2beta1/destinations_json.gen.go | 66 + .../pbmesh/v2beta1/expose.pb.binary.go | 28 + proto-public/pbmesh/v2beta1/expose.pb.go | 322 ++ proto-public/pbmesh/v2beta1/expose.proto | 25 + .../pbmesh/v2beta1/expose_deepcopy.gen.go | 48 + .../pbmesh/v2beta1/expose_json.gen.go | 33 + .../pbmesh/v2beta1/grpc_route.pb.binary.go | 78 + proto-public/pbmesh/v2beta1/grpc_route.pb.go | 908 ++++++ proto-public/pbmesh/v2beta1/grpc_route.proto | 145 + .../pbmesh/v2beta1/grpc_route_deepcopy.gen.go | 153 + .../pbmesh/v2beta1/grpc_route_json.gen.go | 88 + .../pbmesh/v2beta1/http_route.pb.binary.go | 118 + proto-public/pbmesh/v2beta1/http_route.pb.go | 1445 +++++++++ proto-public/pbmesh/v2beta1/http_route.proto | 263 ++ .../pbmesh/v2beta1/http_route_deepcopy.gen.go | 237 ++ .../pbmesh/v2beta1/http_route_json.gen.go | 132 + .../v2beta1/http_route_retries.pb.binary.go | 18 + .../pbmesh/v2beta1/http_route_retries.pb.go | 211 ++ .../pbmesh/v2beta1/http_route_retries.proto | 26 + .../http_route_retries_deepcopy.gen.go | 27 + .../v2beta1/http_route_retries_json.gen.go | 22 + .../v2beta1/http_route_timeouts.pb.binary.go | 18 + .../pbmesh/v2beta1/http_route_timeouts.pb.go | 191 ++ .../pbmesh/v2beta1/http_route_timeouts.proto | 21 + .../http_route_timeouts_deepcopy.gen.go | 27 + .../v2beta1/http_route_timeouts_json.gen.go | 22 + .../v2beta1/mesh_configuration.pb.binary.go | 18 + .../pbmesh/v2beta1/mesh_configuration.pb.go | 160 + .../pbmesh/v2beta1/mesh_configuration.proto | 14 + .../mesh_configuration_deepcopy.gen.go | 27 + .../v2beta1/mesh_configuration_json.gen.go | 22 + .../pbmesh/v2beta1/mesh_gateway.pb.binary.go | 28 + .../pbmesh/v2beta1/mesh_gateway.pb.go | 289 ++ .../pbmesh/v2beta1/mesh_gateway.proto | 31 + .../v2beta1/mesh_gateway_deepcopy.gen.go | 48 + .../pbmesh/v2beta1/mesh_gateway_json.gen.go | 33 + .../pbproxystate/access_logs.pb.binary.go | 18 + .../v2beta1/pbproxystate/access_logs.pb.go | 327 ++ .../v2beta1/pbproxystate/access_logs.proto | 34 + .../pbproxystate/access_logs_deepcopy.gen.go | 27 + .../pbproxystate/access_logs_json.gen.go | 22 + .../v2beta1/pbproxystate/address.pb.binary.go | 28 + .../pbmesh/v2beta1/pbproxystate/address.pb.go | 253 ++ .../pbmesh/v2beta1/pbproxystate/address.proto | 20 + .../pbproxystate/address_deepcopy.gen.go | 48 + .../v2beta1/pbproxystate/address_json.gen.go | 33 + .../v2beta1/pbproxystate/cluster.pb.binary.go | 268 ++ .../pbmesh/v2beta1/pbproxystate/cluster.pb.go | 2643 +++++++++++++++++ .../pbmesh/v2beta1/pbproxystate/cluster.proto | 197 ++ .../pbproxystate/cluster_deepcopy.gen.go | 552 ++++ .../v2beta1/pbproxystate/cluster_json.gen.go | 297 ++ .../pbproxystate/endpoints.pb.binary.go | 28 + .../v2beta1/pbproxystate/endpoints.pb.go | 387 +++ .../v2beta1/pbproxystate/endpoints.proto | 31 + .../pbproxystate/endpoints_deepcopy.gen.go | 48 + .../pbproxystate/endpoints_json.gen.go | 33 + .../pbproxystate/escape_hatches.pb.binary.go | 18 + .../v2beta1/pbproxystate/escape_hatches.pb.go | 173 ++ .../v2beta1/pbproxystate/escape_hatches.proto | 11 + .../escape_hatches_deepcopy.gen.go | 27 + .../pbproxystate/escape_hatches_json.gen.go | 22 + .../header_mutations.pb.binary.go | 68 + .../pbproxystate/header_mutations.pb.go | 700 +++++ .../pbproxystate/header_mutations.proto | 50 + .../header_mutations_deepcopy.gen.go | 132 + .../pbproxystate/header_mutations_json.gen.go | 77 + .../v2beta1/pbproxystate/intentions.pb.go | 211 ++ .../pbproxystate/listener.pb.binary.go | 88 + .../v2beta1/pbproxystate/listener.pb.go | 1500 ++++++++++ .../v2beta1/pbproxystate/listener.proto | 171 ++ .../pbproxystate/listener_deepcopy.gen.go | 174 ++ .../v2beta1/pbproxystate/listener_json.gen.go | 99 + .../v2beta1/pbproxystate/protocol.pb.go | 175 ++ .../v2beta1/pbproxystate/protocol.proto | 19 + .../v2beta1/pbproxystate/protocol_test.go | 22 + .../pbproxystate/references.pb.binary.go | 38 + .../v2beta1/pbproxystate/references.pb.go | 382 +++ .../v2beta1/pbproxystate/references.proto | 31 + .../pbproxystate/references_deepcopy.gen.go | 69 + .../pbproxystate/references_json.gen.go | 44 + .../v2beta1/pbproxystate/route.pb.binary.go | 168 ++ .../pbmesh/v2beta1/pbproxystate/route.pb.go | 1830 ++++++++++++ .../pbmesh/v2beta1/pbproxystate/route.proto | 134 + .../pbproxystate/route_deepcopy.gen.go | 342 +++ .../v2beta1/pbproxystate/route_json.gen.go | 187 ++ .../traffic_permissions.pb.binary.go | 78 + .../pbproxystate/traffic_permissions.pb.go | 799 +++++ .../pbproxystate/traffic_permissions.proto | 64 + .../traffic_permissions_deepcopy.gen.go | 153 + .../traffic_permissions_json.gen.go | 88 + .../transport_socket.pb.binary.go | 138 + .../pbproxystate/transport_socket.pb.go | 1507 ++++++++++ .../pbproxystate/transport_socket.proto | 141 + .../transport_socket_deepcopy.gen.go | 279 ++ .../pbproxystate/transport_socket_json.gen.go | 154 + .../v2beta1/proxy_configuration.pb.binary.go | 68 + .../pbmesh/v2beta1/proxy_configuration.pb.go | 1227 ++++++++ .../pbmesh/v2beta1/proxy_configuration.proto | 173 ++ .../v2beta1/proxy_configuration_addon.go | 14 + .../v2beta1/proxy_configuration_addon_test.go | 53 + .../proxy_configuration_deepcopy.gen.go | 132 + .../v2beta1/proxy_configuration_json.gen.go | 77 + .../pbmesh/v2beta1/proxy_state.pb.binary.go | 28 + proto-public/pbmesh/v2beta1/proxy_state.pb.go | 551 ++++ proto-public/pbmesh/v2beta1/proxy_state.proto | 56 + .../v2beta1/proxy_state_deepcopy.gen.go | 48 + .../pbmesh/v2beta1/proxy_state_json.gen.go | 33 + .../pbmesh/v2beta1/resources.rtypes.go | 127 + proto-public/pbmesh/v2beta1/routing.pb.go | 183 ++ proto-public/pbmesh/v2beta1/routing.proto | 38 + .../pbmesh/v2beta1/tcp_route.pb.binary.go | 38 + proto-public/pbmesh/v2beta1/tcp_route.pb.go | 362 +++ proto-public/pbmesh/v2beta1/tcp_route.proto | 56 + .../pbmesh/v2beta1/tcp_route_deepcopy.gen.go | 69 + .../pbmesh/v2beta1/tcp_route_json.gen.go | 44 + proto-public/pbmesh/v2beta1/xroute_addons.go | 91 + .../pbmesh/v2beta1/xroute_addons_test.go | 173 ++ .../v2beta1/resources.rtypes.go | 22 + .../v2beta1/sameness_group.pb.binary.go | 28 + .../v2beta1/sameness_group.pb.go | 292 ++ .../v2beta1/sameness_group.proto | 22 + .../v2beta1/sameness_group_deepcopy.gen.go | 48 + .../v2beta1/sameness_group_json.gen.go | 33 + .../pbtenancy/v2beta1/namespace.pb.binary.go | 18 + .../pbtenancy/v2beta1/namespace.pb.go | 174 ++ .../pbtenancy/v2beta1/namespace.proto | 19 + .../v2beta1/namespace_deepcopy.gen.go | 27 + .../pbtenancy/v2beta1/namespace_json.gen.go | 22 + .../pbtenancy/v2beta1/partition.pb.binary.go | 18 + .../pbtenancy/v2beta1/partition.pb.go | 183 ++ .../pbtenancy/v2beta1/partition.proto | 20 + .../v2beta1/partition_deepcopy.gen.go | 27 + .../pbtenancy/v2beta1/partition_json.gen.go | 22 + .../pbtenancy/v2beta1/resources.rtypes.go | 29 + proto/private/pbconnect/connect.gen.go | 4 + proto/private/pbconnect/connect.pb.go | 100 +- proto/private/pbconnect/connect.proto | 6 +- proto/private/pbdemo/v2/demo.pb.binary.go | 10 - proto/private/pbdemo/v2/demo.pb.go | 252 +- proto/private/pbdemo/v2/demo.proto | 12 - proto/private/pbdemo/v2/resources.rtypes.go | 11 +- test-integ/go.mod | 4 +- test-integ/go.sum | 16 - test/integration/consul-container/go.mod | 6 +- test/integration/consul-container/go.sum | 14 - .../consul-container/libs/assert/service.go | 69 +- testing/deployer/go.mod | 2 +- .../deployer/sprawl/sprawltest/test_test.go | 7 +- testing/deployer/topology/default_versions.go | 9 +- testing/deployer/util/v2.go | 7 + ui/package.json | 3 +- ui/yarn.lock | 10 +- .../content/api-docs/acl/binding-rules.mdx | 4 +- website/content/api-docs/acl/policies.mdx | 2 +- .../improving-consul-resilience.mdx | 2 +- .../content/docs/connect/proxies/envoy.mdx | 1 - website/content/docs/k8s/helm.mdx | 41 +- .../docs/release-notes/consul-k8s/v1_2_x.mdx | 8 - .../docs/release-notes/consul-k8s/v1_3_x.mdx | 2 - .../docs/release-notes/consul-k8s/v1_4_x.mdx | 7 - .../docs/release-notes/consul/v1_19_x.mdx | 2 +- .../docs/security/security-models/core.mdx | 16 +- .../docs/security/security-models/index.mdx | 6 +- .../docs/security/security-models/nia.mdx | 16 +- .../cli-http-api-partition-options.mdx | 2 +- .../http-api-body-options-partition.mdx | 2 +- .../http-api-query-parms-partition.mdx | 2 +- .../architecture/cluster-peering-diagram.png | Bin 30403 -> 0 bytes .../consul-singleDC-redundancyzones.png | Bin 54779 -> 0 bytes 1298 files changed, 187059 insertions(+), 2190 deletions(-) delete mode 100644 .changelog/21592.txt delete mode 100644 .changelog/21616.txt delete mode 100644 .changelog/21735.txt rename .github/workflows/{nightly-test-1.19.x.yaml => nightly-test-1.14.x.yaml} (96%) rename .github/workflows/{nightly-test-1.18.x.yaml => nightly-test-1.16.x.yaml} (93%) rename .github/workflows/{nightly-test-integrations-1.19.x.yml => nightly-test-integrations-1.16.x.yml} (67%) delete mode 100644 .github/workflows/nightly-test-integrations-1.18.x.yml create mode 100644 agent/connect/uri_workload_identity.go create mode 100644 agent/connect/uri_workload_identity_ce.go create mode 100644 agent/connect/uri_workload_identity_test.go create mode 100644 agent/consul/leader_ce.go create mode 100644 agent/consul/leader_registrator_v2.go create mode 100644 agent/consul/leader_registrator_v2_test.go create mode 100644 agent/grpc-external/services/resource/delete_ce.go create mode 100644 agent/proxycfg-sources/catalog/config_source_oss.go create mode 100644 agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl create mode 100644 agent/structs/acltemplatedpolicy/schemas/workload-identity.json create mode 100644 agent/xds/proxystateconverter/clusters.go create mode 100644 agent/xds/proxystateconverter/converter.go create mode 100644 agent/xds/proxystateconverter/endpoints.go create mode 100644 agent/xds/proxystateconverter/failover_policy.go create mode 100644 agent/xds/proxystateconverter/failover_policy_ce.go create mode 100644 agent/xds/proxystateconverter/listeners.go create mode 100644 agent/xds/proxystateconverter/locality_policy.go create mode 100644 agent/xds/proxystateconverter/locality_policy_ce.go create mode 100644 agent/xds/proxystateconverter/routes.go create mode 100644 agent/xdsv2/cluster_resources.go create mode 100644 agent/xdsv2/endpoint_resources.go create mode 100644 agent/xdsv2/listener_resources.go create mode 100644 agent/xdsv2/rbac_resources.go create mode 100644 agent/xdsv2/resources.go create mode 100644 agent/xdsv2/resources_test.go create mode 100644 agent/xdsv2/route_resources.go create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-ip-port-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/l4-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/mixed-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/l7-expose-paths-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/local-and-inbound-connections-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/clusters/source/single-workload-address-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-ip-port-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/l4-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/mixed-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/l7-expose-paths-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/local-and-inbound-connections-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/endpoints/source/single-workload-address-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-ip-port-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/l4-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/mixed-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/l7-expose-paths-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/local-and-inbound-connections-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/listeners/source/single-workload-address-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-ip-port-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/l4-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/mixed-multi-destination-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/l7-expose-paths-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/local-and-inbound-connections-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-default-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-default-default.golden create mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-foo-bar.golden create mode 100644 agent/xdsv2/testdata/routes/source/single-workload-address-without-ports-foo-default.golden create mode 100644 internal/auth/exports.go create mode 100644 internal/auth/internal/controllers/register.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/builder.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/controller.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/controller_test.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/expander/expander_ce.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/expander/expander_ce/expander_ce.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/expander/interface.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/helpers_ce.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/index.go create mode 100644 internal/auth/internal/controllers/trafficpermissions/status.go create mode 100644 internal/auth/internal/mappers/trafficpermissionsmapper/traffic_permissions_mapper.go create mode 100644 internal/auth/internal/types/computed_traffic_permissions.go create mode 100644 internal/auth/internal/types/computed_traffic_permissions_test.go create mode 100644 internal/auth/internal/types/errors.go create mode 100644 internal/auth/internal/types/namespace_traffic_permissions.go create mode 100644 internal/auth/internal/types/namespace_traffic_permissions_test.go create mode 100644 internal/auth/internal/types/partition_traffic_permissions.go create mode 100644 internal/auth/internal/types/partition_traffic_permissions_test.go create mode 100644 internal/auth/internal/types/traffic_permissions.go create mode 100644 internal/auth/internal/types/traffic_permissions_test.go create mode 100644 internal/auth/internal/types/types.go create mode 100644 internal/auth/internal/types/validate.go create mode 100644 internal/auth/internal/types/validate_ce.go create mode 100644 internal/auth/internal/types/workload_identity.go create mode 100644 internal/auth/internal/types/workload_identity_test.go create mode 100644 internal/catalog/catalogtest/helpers/acl_hooks_test_helpers.go create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-service.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-1-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-1.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-10-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-10.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-11-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-11.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-12-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-12.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-13-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-13.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-14-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-14.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-15-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-15.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-16-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-16.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-17-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-17.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-18-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-18.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-19-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-19.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-2-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-2.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-20-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-20.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-3-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-3.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-4-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-4.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-5-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-5.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-6-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-6.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-7-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-7.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-8-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-8.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-9-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/api-workload-9.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/foo-service-endpoints.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/foo-service.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/grpc-api-service.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/http-api-service.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-1-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-1.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-2-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-2.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-3-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-3.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-4-health.json create mode 100644 internal/catalog/catalogtest/integration_test_data/v2beta1/node-4.json create mode 100644 internal/catalog/catalogtest/run_test.go create mode 100644 internal/catalog/catalogtest/test_integration_v2beta1.go create mode 100644 internal/catalog/catalogtest/test_lifecycle_v2beta1.go create mode 100644 internal/catalog/exports.go create mode 100644 internal/catalog/internal/controllers/endpoints/bound.go create mode 100644 internal/catalog/internal/controllers/endpoints/bound_test.go create mode 100644 internal/catalog/internal/controllers/endpoints/controller.go create mode 100644 internal/catalog/internal/controllers/endpoints/controller_test.go create mode 100644 internal/catalog/internal/controllers/endpoints/status.go create mode 100644 internal/catalog/internal/controllers/failover/controller.go create mode 100644 internal/catalog/internal/controllers/failover/controller_test.go create mode 100644 internal/catalog/internal/controllers/failover/expander/expander_ce.go create mode 100644 internal/catalog/internal/controllers/failover/expander/expander_ce/expander.go create mode 100644 internal/catalog/internal/controllers/failover/expander/expander_ce/expander_test.go create mode 100644 internal/catalog/internal/controllers/failover/expander/interface.go create mode 100644 internal/catalog/internal/controllers/failover/helpers_ce.go create mode 100644 internal/catalog/internal/controllers/failover/status.go create mode 100644 internal/catalog/internal/controllers/nodehealth/controller.go create mode 100644 internal/catalog/internal/controllers/nodehealth/controller_test.go create mode 100644 internal/catalog/internal/controllers/nodehealth/status.go create mode 100644 internal/catalog/internal/controllers/register.go create mode 100644 internal/catalog/internal/controllers/workloadhealth/controller.go create mode 100644 internal/catalog/internal/controllers/workloadhealth/controller_test.go create mode 100644 internal/catalog/internal/controllers/workloadhealth/status.go create mode 100644 internal/catalog/internal/testhelpers/acl_hooks_test_helpers.go create mode 100644 internal/catalog/internal/types/computed_failover_policy.go create mode 100644 internal/catalog/internal/types/computed_failover_policy_test.go create mode 100644 internal/catalog/internal/types/errors.go create mode 100644 internal/catalog/internal/types/errors_test.go create mode 100644 internal/catalog/internal/types/failover_policy.go create mode 100644 internal/catalog/internal/types/failover_policy_test.go create mode 100644 internal/catalog/internal/types/health_checks.go create mode 100644 internal/catalog/internal/types/health_checks_test.go create mode 100644 internal/catalog/internal/types/health_status.go create mode 100644 internal/catalog/internal/types/health_status_test.go create mode 100644 internal/catalog/internal/types/node.go create mode 100644 internal/catalog/internal/types/node_health_status.go create mode 100644 internal/catalog/internal/types/node_health_status_test.go create mode 100644 internal/catalog/internal/types/node_test.go create mode 100644 internal/catalog/internal/types/service.go create mode 100644 internal/catalog/internal/types/service_endpoints.go create mode 100644 internal/catalog/internal/types/service_endpoints_test.go create mode 100644 internal/catalog/internal/types/service_test.go create mode 100644 internal/catalog/internal/types/testdata/errDNSPassingWeightOutOfRange.golden create mode 100644 internal/catalog/internal/types/testdata/errDNSWarningWeightOutOfRange.golden create mode 100644 internal/catalog/internal/types/testdata/errInvalidEndpointsOwnerName.golden create mode 100644 internal/catalog/internal/types/testdata/errInvalidNodeHostFormat.golden create mode 100644 internal/catalog/internal/types/testdata/errInvalidPhysicalPort.golden create mode 100644 internal/catalog/internal/types/testdata/errInvalidPortReference.golden create mode 100644 internal/catalog/internal/types/testdata/errInvalidVirtualPort.golden create mode 100644 internal/catalog/internal/types/testdata/errInvalidWorkloadHostFormat.golden create mode 100644 internal/catalog/internal/types/testdata/errLocalityZoneNoRegion.golden create mode 100644 internal/catalog/internal/types/testdata/errNotDNSLabel.golden create mode 100644 internal/catalog/internal/types/testdata/errNotIPAddress.golden create mode 100644 internal/catalog/internal/types/testdata/errTooMuchMesh.golden create mode 100644 internal/catalog/internal/types/testdata/errUnixSocketMultiport.golden create mode 100644 internal/catalog/internal/types/testdata/errVirtualPortReused.golden create mode 100644 internal/catalog/internal/types/types.go create mode 100644 internal/catalog/internal/types/types_test.go create mode 100644 internal/catalog/internal/types/validators.go create mode 100644 internal/catalog/internal/types/validators_test.go create mode 100644 internal/catalog/internal/types/virtual_ips.go create mode 100644 internal/catalog/internal/types/virtual_ips_test.go create mode 100644 internal/catalog/internal/types/workload.go create mode 100644 internal/catalog/internal/types/workload_test.go create mode 100644 internal/catalog/workloadselector/acls.go create mode 100644 internal/catalog/workloadselector/acls_test.go create mode 100644 internal/catalog/workloadselector/gather.go create mode 100644 internal/catalog/workloadselector/gather_test.go create mode 100644 internal/catalog/workloadselector/index.go create mode 100644 internal/catalog/workloadselector/index_test.go create mode 100644 internal/catalog/workloadselector/integ_test.go create mode 100644 internal/catalog/workloadselector/mapper.go create mode 100644 internal/catalog/workloadselector/mapper_test.go create mode 100644 internal/catalog/workloadselector/selecting.go create mode 100644 internal/mesh/exports.go create mode 100644 internal/mesh/internal/controllers/apigateways/controller.go create mode 100644 internal/mesh/internal/controllers/apigateways/controller_test.go create mode 100644 internal/mesh/internal/controllers/apigateways/fetcher/data_fetcher.go create mode 100644 internal/mesh/internal/controllers/apigateways/fetcher/data_fetcher_test.go create mode 100644 internal/mesh/internal/controllers/explicitdestinations/controller.go create mode 100644 internal/mesh/internal/controllers/explicitdestinations/controller_test.go create mode 100644 internal/mesh/internal/controllers/explicitdestinations/mapper/mapper.go create mode 100644 internal/mesh/internal/controllers/explicitdestinations/status.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/builder/api_gateway_builder.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/builder/mesh_gateway_builder.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/builder/mesh_gateway_builder_test.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/controller.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/controller_test.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/fetcher/data_fetcher.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/fetcher/data_fetcher_test.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/mapper/apigatewayworkloads.go create mode 100644 internal/mesh/internal/controllers/gatewayproxy/mapper/meshgatewayworkloads.go create mode 100644 internal/mesh/internal/controllers/implicitdestinations/auth_helper_test.go create mode 100644 internal/mesh/internal/controllers/implicitdestinations/controller.go create mode 100644 internal/mesh/internal/controllers/implicitdestinations/controller_test.go create mode 100644 internal/mesh/internal/controllers/implicitdestinations/index.go create mode 100644 internal/mesh/internal/controllers/implicitdestinations/index_test.go create mode 100644 internal/mesh/internal/controllers/implicitdestinations/mapper.go create mode 100644 internal/mesh/internal/controllers/implicitdestinations/status.go create mode 100644 internal/mesh/internal/controllers/meshconfiguration/controller.go create mode 100644 internal/mesh/internal/controllers/meshconfiguration/controller_test.go create mode 100644 internal/mesh/internal/controllers/meshgateways/controller.go create mode 100644 internal/mesh/internal/controllers/proxyconfiguration/controller.go create mode 100644 internal/mesh/internal/controllers/proxyconfiguration/controller_test.go create mode 100644 internal/mesh/internal/controllers/proxyconfiguration/sort.go create mode 100644 internal/mesh/internal/controllers/proxyconfiguration/sort_test.go create mode 100644 internal/mesh/internal/controllers/register.go create mode 100644 internal/mesh/internal/controllers/routes/controller.go create mode 100644 internal/mesh/internal/controllers/routes/controller_test.go create mode 100644 internal/mesh/internal/controllers/routes/destination_policy_validation.go create mode 100644 internal/mesh/internal/controllers/routes/destination_policy_validation_test.go create mode 100644 internal/mesh/internal/controllers/routes/generate.go create mode 100644 internal/mesh/internal/controllers/routes/generate_test.go create mode 100644 internal/mesh/internal/controllers/routes/intermediate.go create mode 100644 internal/mesh/internal/controllers/routes/loader/loader.go create mode 100644 internal/mesh/internal/controllers/routes/loader/loader_test.go create mode 100644 internal/mesh/internal/controllers/routes/loader/memoized.go create mode 100644 internal/mesh/internal/controllers/routes/loader/related.go create mode 100644 internal/mesh/internal/controllers/routes/pending_status.go create mode 100644 internal/mesh/internal/controllers/routes/ref_validation.go create mode 100644 internal/mesh/internal/controllers/routes/ref_validation_test.go create mode 100644 internal/mesh/internal/controllers/routes/routestest/routestest.go create mode 100644 internal/mesh/internal/controllers/routes/sort_rules.go create mode 100644 internal/mesh/internal/controllers/routes/sort_rules_test.go create mode 100644 internal/mesh/internal/controllers/routes/status.go create mode 100644 internal/mesh/internal/controllers/routes/util.go create mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/.mockery.yaml create mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/util.go create mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/xroutemapper.go create mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/xroutemapper_test.go create mode 100644 internal/mesh/internal/controllers/routes/xroutemapper/xroutemappermock/mock_ResolveFailoverServiceDestinations.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/builder.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/builder_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/destination_multiport_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/destinations.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/destinations_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/expose_paths_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/local_app.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/local_app_multiport_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/local_app_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/naming.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/routes.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multi-destination-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-ip-port-bind-address-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/l4-single-implicit-destination-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/mixed-multi-destination-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/l7-expose-paths.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/local-and-inbound-connections.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-with-specific-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiple-workload-addresses-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-multiple-workload-addresses-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-single-workload-address-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l4-workload-with-only-mesh-port.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-multiple-workload-addresses-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/multiport-l7-single-workload-address-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/builder/testdata/source/single-workload-address-without-ports.golden create mode 100644 internal/mesh/internal/controllers/sidecarproxy/controller.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/controller_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/data_fetcher.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/data_fetcher_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/helper_test.go create mode 100644 internal/mesh/internal/controllers/sidecarproxy/mapper.go create mode 100644 internal/mesh/internal/controllers/xds/controller.go create mode 100644 internal/mesh/internal/controllers/xds/controller_test.go create mode 100644 internal/mesh/internal/controllers/xds/endpoint_builder.go create mode 100644 internal/mesh/internal/controllers/xds/endpoint_builder_test.go create mode 100644 internal/mesh/internal/controllers/xds/leaf_cancels.go create mode 100644 internal/mesh/internal/controllers/xds/leaf_mapper.go create mode 100644 internal/mesh/internal/controllers/xds/mock_updater.go create mode 100644 internal/mesh/internal/controllers/xds/proxy_tracker_watch.go create mode 100644 internal/mesh/internal/controllers/xds/reconciliation_data.go create mode 100644 internal/mesh/internal/controllers/xds/status/status.go create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-implicit-and-explicit-destinations-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multi-destination-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-ip-port-bind-address-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-destination-unix-socket-bind-address-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/l4-single-implicit-destination-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/mixed-multi-destination-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-multiple-implicit-destinations-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/destination/multiport-l4-and-l7-single-implicit-destination-with-multiple-workloads-tproxy-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/l7-expose-paths.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/local-and-inbound-connections.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-with-specific-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiple-workload-addresses-without-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-with-specific-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-multiple-workload-addresses-without-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-single-workload-address-without-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l4-workload-with-only-mesh-port.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-with-specific-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-multiple-workload-addresses-without-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/multiport-l7-single-workload-address-without-ports.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-default-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-default-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-foo-bar.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports-foo-default.golden create mode 100644 internal/mesh/internal/controllers/xds/testdata/source/single-workload-address-without-ports.golden create mode 100644 internal/mesh/internal/mappers/common/workload_selector_util.go create mode 100644 internal/mesh/internal/mappers/common/workload_selector_util_test.go create mode 100644 internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper.go create mode 100644 internal/mesh/internal/mappers/workloadselectionmapper/workload_selection_mapper_test.go create mode 100644 internal/mesh/internal/meshindexes/computed_routes.go create mode 100644 internal/mesh/internal/meshindexes/computed_routes_test.go create mode 100644 internal/mesh/internal/types/api_gateway.go create mode 100644 internal/mesh/internal/types/computed_explicit_destinations.go create mode 100644 internal/mesh/internal/types/computed_implicit_destinations.go create mode 100644 internal/mesh/internal/types/computed_implicit_destinations_test.go create mode 100644 internal/mesh/internal/types/computed_proxy_configuration.go create mode 100644 internal/mesh/internal/types/computed_routes.go create mode 100644 internal/mesh/internal/types/computed_routes_test.go create mode 100644 internal/mesh/internal/types/decoded.go create mode 100644 internal/mesh/internal/types/destination_policy.go create mode 100644 internal/mesh/internal/types/destination_policy_test.go create mode 100644 internal/mesh/internal/types/destinations.go create mode 100644 internal/mesh/internal/types/destinations_configuration.go create mode 100644 internal/mesh/internal/types/destinations_configuration_test.go create mode 100644 internal/mesh/internal/types/destinations_test.go create mode 100644 internal/mesh/internal/types/errors.go create mode 100644 internal/mesh/internal/types/grpc_route.go create mode 100644 internal/mesh/internal/types/grpc_route_test.go create mode 100644 internal/mesh/internal/types/http_route.go create mode 100644 internal/mesh/internal/types/http_route_test.go create mode 100644 internal/mesh/internal/types/intermediate/types.go create mode 100644 internal/mesh/internal/types/mesh_configuration.go create mode 100644 internal/mesh/internal/types/mesh_gateway.go create mode 100644 internal/mesh/internal/types/mesh_gateway_test.go create mode 100644 internal/mesh/internal/types/proxy_configuration.go create mode 100644 internal/mesh/internal/types/proxy_configuration_test.go create mode 100644 internal/mesh/internal/types/proxy_state_template.go create mode 100644 internal/mesh/internal/types/proxy_state_template_test.go create mode 100644 internal/mesh/internal/types/tcp_route.go create mode 100644 internal/mesh/internal/types/tcp_route_test.go create mode 100644 internal/mesh/internal/types/types.go create mode 100644 internal/mesh/internal/types/types_test.go create mode 100644 internal/mesh/internal/types/util.go create mode 100644 internal/mesh/internal/types/xroute.go create mode 100644 internal/mesh/internal/types/xroute_test.go create mode 100644 internal/mesh/proxy-snapshot/proxy_snapshot.go create mode 100644 internal/mesh/proxy-tracker/mock_SessionLimiter.go create mode 100644 internal/mesh/proxy-tracker/proxy_state_exports.go create mode 100644 internal/mesh/proxy-tracker/proxy_state_exports_test.go create mode 100644 internal/mesh/proxy-tracker/proxy_tracker.go create mode 100644 internal/mesh/proxy-tracker/proxy_tracker_test.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/builder.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/controller.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/controller_test.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/expander_ce.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/expander_ce/expander.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/expander_ce/expander_test.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/expander/types/types.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/helpers_ce.go create mode 100644 internal/multicluster/internal/controllers/exportedservices/status.go create mode 100644 internal/multicluster/internal/types/types_ce.go create mode 100644 internal/resource/mappers/selectiontracker/selection_tracker.go create mode 100644 internal/resource/mappers/selectiontracker/selection_tracker_test.go create mode 100644 internal/resourcehcl/testdata/destinations.golden create mode 100644 internal/resourcehcl/testdata/destinations.hcl create mode 100644 internal/resourcehcl/testdata/no-blocks.golden create mode 100644 internal/resourcehcl/testdata/no-blocks.hcl create mode 100644 internal/tenancy/exports.go create mode 100644 internal/tenancy/internal/bridge/tenancy_bridge.go create mode 100644 internal/tenancy/internal/bridge/tenancy_bridge_ce.go create mode 100644 internal/tenancy/internal/controllers/common/common.go create mode 100644 internal/tenancy/internal/controllers/namespace/controller.go create mode 100644 internal/tenancy/internal/controllers/register.go create mode 100644 internal/tenancy/internal/controllers/register_ce.go create mode 100644 internal/tenancy/internal/types/errors.go create mode 100644 internal/tenancy/internal/types/namespace.go create mode 100644 internal/tenancy/internal/types/types.go create mode 100644 internal/tenancy/internal/types/types_ce.go create mode 100644 internal/tenancy/internal/types/types_test.go create mode 100644 internal/tenancy/tenancytest/namespace_controller_test.go create mode 100644 internal/tenancy/tenancytest/namespace_test.go create mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions.pb.binary.go create mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions.pb.go create mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions.proto create mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions_deepcopy.gen.go create mode 100644 proto-public/pbauth/v2beta1/computed_traffic_permissions_json.gen.go create mode 100644 proto-public/pbauth/v2beta1/resources.rtypes.go create mode 100644 proto-public/pbauth/v2beta1/traffic_permission_extras_test.go create mode 100644 proto-public/pbauth/v2beta1/traffic_permissions.pb.binary.go create mode 100644 proto-public/pbauth/v2beta1/traffic_permissions.pb.go create mode 100644 proto-public/pbauth/v2beta1/traffic_permissions.proto create mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_addon.go create mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_deepcopy.gen.go create mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_extras.go create mode 100644 proto-public/pbauth/v2beta1/traffic_permissions_json.gen.go create mode 100644 proto-public/pbauth/v2beta1/workload_identity.pb.binary.go create mode 100644 proto-public/pbauth/v2beta1/workload_identity.pb.go create mode 100644 proto-public/pbauth/v2beta1/workload_identity.proto create mode 100644 proto-public/pbauth/v2beta1/workload_identity_deepcopy.gen.go create mode 100644 proto-public/pbauth/v2beta1/workload_identity_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy.proto create mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_extras.go create mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_extras_test.go create mode 100644 proto-public/pbcatalog/v2beta1/computed_failover_policy_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/failover_policy.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/failover_policy.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/failover_policy.proto create mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_extras.go create mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_extras_test.go create mode 100644 proto-public/pbcatalog/v2beta1/failover_policy_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/health.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/health.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/health.proto create mode 100644 proto-public/pbcatalog/v2beta1/health_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/health_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/node.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/node.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/node.proto create mode 100644 proto-public/pbcatalog/v2beta1/node_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/node_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/protocol.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/protocol.proto create mode 100644 proto-public/pbcatalog/v2beta1/resources.rtypes.go create mode 100644 proto-public/pbcatalog/v2beta1/selector.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/selector.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/selector.proto create mode 100644 proto-public/pbcatalog/v2beta1/selector_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/selector_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/service.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/service.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/service.proto create mode 100644 proto-public/pbcatalog/v2beta1/service_addon.go create mode 100644 proto-public/pbcatalog/v2beta1/service_addon_test.go create mode 100644 proto-public/pbcatalog/v2beta1/service_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints.proto create mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_addon.go create mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_addon_test.go create mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/service_endpoints_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/service_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/vip.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/vip.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/vip.proto create mode 100644 proto-public/pbcatalog/v2beta1/vip_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/vip_json.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/workload.pb.binary.go create mode 100644 proto-public/pbcatalog/v2beta1/workload.pb.go create mode 100644 proto-public/pbcatalog/v2beta1/workload.proto create mode 100644 proto-public/pbcatalog/v2beta1/workload_addon.go create mode 100644 proto-public/pbcatalog/v2beta1/workload_addon_test.go create mode 100644 proto-public/pbcatalog/v2beta1/workload_deepcopy.gen.go create mode 100644 proto-public/pbcatalog/v2beta1/workload_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/api_gateway.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/api_gateway.pb.go create mode 100644 proto-public/pbmesh/v2beta1/api_gateway.proto create mode 100644 proto-public/pbmesh/v2beta1/api_gateway_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/api_gateway_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/common.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/common.pb.go create mode 100644 proto-public/pbmesh/v2beta1/common.proto create mode 100644 proto-public/pbmesh/v2beta1/common_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/common_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations.pb.go create mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations.proto create mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_explicit_destinations_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes.pb.go create mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes.proto create mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_gateway_routes_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.go create mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations.proto create mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_implicit_destinations_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration.pb.go create mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration.proto create mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_proxy_configuration_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_routes.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/computed_routes.pb.go create mode 100644 proto-public/pbmesh/v2beta1/computed_routes.proto create mode 100644 proto-public/pbmesh/v2beta1/computed_routes_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/computed_routes_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/connection.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/connection.pb.go create mode 100644 proto-public/pbmesh/v2beta1/connection.proto create mode 100644 proto-public/pbmesh/v2beta1/connection_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/connection_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/destination_policy.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/destination_policy.pb.go create mode 100644 proto-public/pbmesh/v2beta1/destination_policy.proto create mode 100644 proto-public/pbmesh/v2beta1/destination_policy_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/destination_policy_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/destinations.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/destinations.pb.go create mode 100644 proto-public/pbmesh/v2beta1/destinations.proto create mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration.pb.go create mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration.proto create mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/destinations_configuration_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/destinations_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/destinations_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/expose.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/expose.pb.go create mode 100644 proto-public/pbmesh/v2beta1/expose.proto create mode 100644 proto-public/pbmesh/v2beta1/expose_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/expose_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/grpc_route.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/grpc_route.pb.go create mode 100644 proto-public/pbmesh/v2beta1/grpc_route.proto create mode 100644 proto-public/pbmesh/v2beta1/grpc_route_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/grpc_route_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/http_route.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/http_route.pb.go create mode 100644 proto-public/pbmesh/v2beta1/http_route.proto create mode 100644 proto-public/pbmesh/v2beta1/http_route_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_retries.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_retries.pb.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_retries.proto create mode 100644 proto-public/pbmesh/v2beta1/http_route_retries_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_retries_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts.pb.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts.proto create mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/http_route_timeouts_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration.pb.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration.proto create mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_configuration_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway.pb.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway.proto create mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/mesh_gateway_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/access_logs_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/address_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/cluster_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/endpoints_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/escape_hatches_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/header_mutations_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/intentions.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/listener_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/protocol.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/protocol.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/protocol_test.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/references_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/route_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/traffic_permissions_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket.pb.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket.proto create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/pbproxystate/transport_socket_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration.pb.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration.proto create mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_addon.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_addon_test.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_configuration_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_state.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_state.pb.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_state.proto create mode 100644 proto-public/pbmesh/v2beta1/proxy_state_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/proxy_state_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/resources.rtypes.go create mode 100644 proto-public/pbmesh/v2beta1/routing.pb.go create mode 100644 proto-public/pbmesh/v2beta1/routing.proto create mode 100644 proto-public/pbmesh/v2beta1/tcp_route.pb.binary.go create mode 100644 proto-public/pbmesh/v2beta1/tcp_route.pb.go create mode 100644 proto-public/pbmesh/v2beta1/tcp_route.proto create mode 100644 proto-public/pbmesh/v2beta1/tcp_route_deepcopy.gen.go create mode 100644 proto-public/pbmesh/v2beta1/tcp_route_json.gen.go create mode 100644 proto-public/pbmesh/v2beta1/xroute_addons.go create mode 100644 proto-public/pbmesh/v2beta1/xroute_addons_test.go create mode 100644 proto-public/pbmulticluster/v2beta1/resources.rtypes.go create mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group.pb.binary.go create mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group.pb.go create mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group.proto create mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group_deepcopy.gen.go create mode 100644 proto-public/pbmulticluster/v2beta1/sameness_group_json.gen.go create mode 100644 proto-public/pbtenancy/v2beta1/namespace.pb.binary.go create mode 100644 proto-public/pbtenancy/v2beta1/namespace.pb.go create mode 100644 proto-public/pbtenancy/v2beta1/namespace.proto create mode 100644 proto-public/pbtenancy/v2beta1/namespace_deepcopy.gen.go create mode 100644 proto-public/pbtenancy/v2beta1/namespace_json.gen.go create mode 100644 proto-public/pbtenancy/v2beta1/partition.pb.binary.go create mode 100644 proto-public/pbtenancy/v2beta1/partition.pb.go create mode 100644 proto-public/pbtenancy/v2beta1/partition.proto create mode 100644 proto-public/pbtenancy/v2beta1/partition_deepcopy.gen.go create mode 100644 proto-public/pbtenancy/v2beta1/partition_json.gen.go create mode 100644 proto-public/pbtenancy/v2beta1/resources.rtypes.go delete mode 100644 website/public/img/architecture/cluster-peering-diagram.png delete mode 100644 website/public/img/architecture/consul-singleDC-redundancyzones.png diff --git a/.changelog/21592.txt b/.changelog/21592.txt deleted file mode 100644 index a8a69f0d9b94a..0000000000000 --- a/.changelog/21592.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:feature -server: remove v2 tenancy, catalog, and mesh experiments -``` diff --git a/.changelog/21616.txt b/.changelog/21616.txt deleted file mode 100644 index f26b47c711d81..0000000000000 --- a/.changelog/21616.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note: improvement -connect: Add Envoy 1.31 and 1.30 to support matrix -``` diff --git a/.changelog/21735.txt b/.changelog/21735.txt deleted file mode 100644 index 223b84b4803cd..0000000000000 --- a/.changelog/21735.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:security -ui: Pin a newer resolution of ansi-html -``` diff --git a/.github/workflows/backport-assistant.yml b/.github/workflows/backport-assistant.yml index d004f220bf8e8..24c0a66da59dd 100644 --- a/.github/workflows/backport-assistant.yml +++ b/.github/workflows/backport-assistant.yml @@ -19,7 +19,7 @@ jobs: backport: if: github.event.pull_request.merged runs-on: ubuntu-latest - container: hashicorpdev/backport-assistant:0.4.4 + container: hashicorpdev/backport-assistant:0.4.1 steps: - name: Run Backport Assistant for release branches run: | diff --git a/.github/workflows/ce-merge-trigger.yml b/.github/workflows/ce-merge-trigger.yml index 9e088e1fd05a0..30a6b5fd90dff 100644 --- a/.github/workflows/ce-merge-trigger.yml +++ b/.github/workflows/ce-merge-trigger.yml @@ -9,11 +9,6 @@ on: branches: - main - release/** - - '!release/1.18**' - - '!release/1.17**' - - '!release/1.16**' - - '!release/1.15**' - jobs: trigger-ce-merge: diff --git a/.github/workflows/nightly-test-1.19.x.yaml b/.github/workflows/nightly-test-1.14.x.yaml similarity index 96% rename from .github/workflows/nightly-test-1.19.x.yaml rename to .github/workflows/nightly-test-1.14.x.yaml index 2f5398596b2f5..8e85e175c27f4 100644 --- a/.github/workflows/nightly-test-1.19.x.yaml +++ b/.github/workflows/nightly-test-1.14.x.yaml @@ -1,7 +1,7 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test 1.19.x +name: Nightly Frontend Test 1.14.x on: schedule: - cron: '0 4 * * *' @@ -9,8 +9,8 @@ on: env: EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition - BRANCH: "release/1.19.x" - BRANCH_NAME: "release-1.19.x" # Used for naming artifacts + BRANCH: "release/1.14.x" + BRANCH_NAME: "release-1.14.x" # Used for naming artifacts GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: @@ -24,7 +24,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -56,7 +56,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -95,7 +95,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -128,7 +128,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -167,7 +167,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -198,7 +198,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock diff --git a/.github/workflows/nightly-test-1.15.x.yaml b/.github/workflows/nightly-test-1.15.x.yaml index f8dec0f82e340..c25e25ac57b1d 100644 --- a/.github/workflows/nightly-test-1.15.x.yaml +++ b/.github/workflows/nightly-test-1.15.x.yaml @@ -14,15 +14,8 @@ env: GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: - check-ent: - runs-on: ubuntu-latest - if: ${{ endsWith(github.repository, '-enterprise') }} - steps: - - run: echo "Building Enterprise" - frontend-test-workspace-node: runs-on: ubuntu-latest - needs: [check-ent] steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: @@ -52,7 +45,6 @@ jobs: frontend-build-ce: runs-on: ubuntu-latest - needs: [check-ent] env: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 @@ -125,7 +117,6 @@ jobs: frontend-build-ent: runs-on: ubuntu-latest - needs: [check-ent] env: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 diff --git a/.github/workflows/nightly-test-1.18.x.yaml b/.github/workflows/nightly-test-1.16.x.yaml similarity index 93% rename from .github/workflows/nightly-test-1.18.x.yaml rename to .github/workflows/nightly-test-1.16.x.yaml index ca627b013932f..6dff72150fa6e 100644 --- a/.github/workflows/nightly-test-1.18.x.yaml +++ b/.github/workflows/nightly-test-1.16.x.yaml @@ -1,7 +1,7 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly Frontend Test 1.18.x +name: Nightly Frontend Test 1.16.x on: schedule: - cron: '0 4 * * *' @@ -9,20 +9,13 @@ on: env: EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition - BRANCH: "release/1.18.x" - BRANCH_NAME: "release-1.18.x" # Used for naming artifacts + BRANCH: "release/1.16.x" + BRANCH_NAME: "release-1.16.x" # Used for naming artifacts GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: - check-ent: - runs-on: ubuntu-latest - if: ${{ endsWith(github.repository, '-enterprise') }} - steps: - - run: echo "Building Enterprise" - frontend-test-workspace-node: runs-on: ubuntu-latest - needs: [check-ent] steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: @@ -31,7 +24,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -52,7 +45,6 @@ jobs: frontend-build-ce: runs-on: ubuntu-latest - needs: [check-ent] env: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 @@ -64,7 +56,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -103,7 +95,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -125,7 +117,6 @@ jobs: frontend-build-ent: runs-on: ubuntu-latest - needs: [check-ent] env: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 @@ -137,7 +128,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -176,7 +167,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock @@ -207,7 +198,7 @@ jobs: # Not necessary to use yarn, but enables caching - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 with: - node-version: 18 + node-version: 14 cache: 'yarn' cache-dependency-path: ./ui/yarn.lock diff --git a/.github/workflows/nightly-test-1.17.x.yaml b/.github/workflows/nightly-test-1.17.x.yaml index 10eb3d9e01731..ad6e49684d9e0 100644 --- a/.github/workflows/nightly-test-1.17.x.yaml +++ b/.github/workflows/nightly-test-1.17.x.yaml @@ -14,15 +14,8 @@ env: GOPRIVATE: github.com/hashicorp # Required for enterprise deps jobs: - check-ent: - runs-on: ubuntu-latest - if: ${{ endsWith(github.repository, '-enterprise') }} - steps: - - run: echo "Building Enterprise" - frontend-test-workspace-node: runs-on: ubuntu-latest - needs: [check-ent] steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: @@ -52,7 +45,6 @@ jobs: frontend-build-ce: runs-on: ubuntu-latest - needs: [check-ent] env: JOBS: 2 CONSUL_NSPACES_ENABLED: 0 @@ -125,7 +117,6 @@ jobs: frontend-build-ent: runs-on: ubuntu-latest - needs: [check-ent] env: JOBS: 2 CONSUL_NSPACES_ENABLED: 1 diff --git a/.github/workflows/nightly-test-integ-peering_commontopo.yml b/.github/workflows/nightly-test-integ-peering_commontopo.yml index 5c7f5fa23babc..84b8a97c1cc63 100644 --- a/.github/workflows/nightly-test-integ-peering_commontopo.yml +++ b/.github/workflows/nightly-test-integ-peering_commontopo.yml @@ -6,7 +6,7 @@ name: Nightly test integrations - peering_common_topo on: schedule: # Run nightly at 12AM UTC/8PM EST/5PM PST - - cron: '0 0 * * *' + - cron: '* 0 * * *' workflow_dispatch: {} env: @@ -39,20 +39,12 @@ jobs: get-go-version: uses: ./.github/workflows/reusable-get-go-version.yml - with: - ref: ${{ inputs.branch }} - - get-envoy-versions: - uses: ./.github/workflows/reusable-get-envoy-versions.yml - with: - ref: ${{ inputs.branch }} tests: runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl ) }} needs: - setup - get-go-version - - get-envoy-versions permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. contents: read @@ -70,7 +62,7 @@ jobs: name: '${{matrix.test-case}}' env: - ENVOY_VERSION: ${{ needs.get-envoy-versions.outputs.max-envoy-version }} + ENVOY_VERSION: "1.29.5" steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. diff --git a/.github/workflows/nightly-test-integrations-1.15.x.yml b/.github/workflows/nightly-test-integrations-1.15.x.yml index abb24520efde2..c40c63a5ad288 100644 --- a/.github/workflows/nightly-test-integrations-1.15.x.yml +++ b/.github/workflows/nightly-test-integrations-1.15.x.yml @@ -6,7 +6,7 @@ name: Nightly test-integrations 1.15.x on: schedule: # Run nightly at 1AM UTC/9PM EST/6PM PST - - cron: '0 1 * * *' + - cron: '* 1 * * *' workflow_dispatch: {} env: @@ -23,15 +23,8 @@ env: BRANCH_NAME: "release-1.15.x" # Used for naming artifacts jobs: - check-ent: - runs-on: ubuntu-latest - if: ${{ endsWith(github.repository, '-enterprise') }} - steps: - - run: echo "Building Enterprise" - setup: runs-on: ubuntu-latest - needs: [check-ent] name: Setup outputs: compute-small: ${{ steps.runners.outputs.compute-small }} @@ -48,16 +41,7 @@ jobs: run: .github/scripts/get_runner_classes.sh get-go-version: - needs: [check-ent] uses: ./.github/workflows/reusable-get-go-version.yml - with: - ref: release/1.15.x - - get-envoy-versions: - needs: [check-ent] - uses: ./.github/workflows/reusable-get-envoy-versions.yml - with: - ref: release/1.15.x dev-build: needs: @@ -87,34 +71,36 @@ jobs: - name: Generate Envoy Job Matrix id: set-matrix env: - # TEST_SPLITS sets the number of test case splits to use in the matrix. This will be - # further multiplied in envoy-integration tests by the other dimensions in the matrix - # to determine the total number of runners used. - TEST_SPLITS: 4 + # this is further going to multiplied in envoy-integration tests by the + # other dimensions in the matrix. Currently TOTAL_RUNNERS would be + # 14 based on these values: + # envoy-version: ["1.22.11", "1.23.12", "1.24.12", "1.25.11", "1.26.8", "1.27.5", "1.28.3"] + # xds-target: ["server", "client"] + TOTAL_RUNNERS: 7 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | + NUM_RUNNERS=$TOTAL_RUNNERS NUM_DIRS=$(find ./test/integration/connect/envoy -mindepth 1 -maxdepth 1 -type d | wc -l) - if [ "$NUM_DIRS" -lt "$TEST_SPLITS" ]; then - echo "TEST_SPLITS is larger than the number of tests/packages to split." - TEST_SPLITS=$((NUM_DIRS-1)) + if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then + echo "TOTAL_RUNNERS is larger than the number of tests/packages to split." + NUM_RUNNERS=$((NUM_DIRS-1)) fi - # fix issue where test splitting calculation generates 1 more split than TEST_SPLITS. - TEST_SPLITS=$((TEST_SPLITS-1)) + # fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS. + NUM_RUNNERS=$((NUM_RUNNERS-1)) { echo -n "envoy-matrix=" find ./test/integration/connect/envoy -maxdepth 1 -type d -print0 \ | xargs -0 -n 1 basename \ - | jq --raw-input --argjson runnercount "$TEST_SPLITS" "$JQ_SLICER" \ + | jq --raw-input --argjson runnercount "$NUM_RUNNERS" "$JQ_SLICER" \ | jq --compact-output 'map(join("|"))' } >> "$GITHUB_OUTPUT" - + envoy-integration-test: runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - get-go-version - - get-envoy-versions - generate-envoy-job-matrices - dev-build permissions: @@ -123,7 +109,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ${{ fromJSON(needs.get-envoy-versions.outputs.envoy-versions-json) }} + envoy-version: ["1.22.11", "1.23.12", "1.24.12", "1.25.11", "1.26.8", "1.27.6", "1.28.4"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -146,15 +132,11 @@ jobs: path: ./bin - name: restore mode+x run: chmod +x ./bin/consul - - name: Set up Docker Buildx uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 - - name: Docker build run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile ./bin - - name: Envoy Integration Tests - id: envoy-integration-tests env: GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml GOTESTSUM_FORMAT: standard-verbose @@ -175,23 +157,6 @@ jobs: --packages=./test/integration/connect/envoy \ -- -timeout=30m -tags integration -run="TestEnvoy/(${{ matrix.test-cases }})" - # See https://github.com/orgs/community/discussions/8945#discussioncomment-9897011 - # and overall topic discussion for why this is necessary. - - name: Generate artifact ID - id: generate-artifact-id - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - run: | - ARTIFACT_ID=$(uuidgen) - echo "Artifact ID: $ARTIFACT_ID (search this in job summary for download link)" - echo "artifact_id=$ARTIFACT_ID" >> "$GITHUB_ENV" - - - name: Upload failure logs - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 - with: - name: envoy-${{ matrix.envoy-version }}-logs-${{ env.artifact_id }} - path: test/integration/connect/envoy/workdir/logs/ - # NOTE: ENT specific step as we store secrets in Vault. - name: Authenticate to Vault if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} @@ -223,7 +188,7 @@ jobs: DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - + upgrade-integration-test: runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: @@ -239,17 +204,7 @@ jobs: consul-version: ["1.14", "1.15"] env: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - # ENVOY_VERSION should be the latest version supported by _all_ Consul versions in the - # matrix.consul-version, since we are testing upgrade from an older Consul version. - # In practice, this should be the highest Envoy version supported by the lowest non-LTS - # Consul version in the matrix (LTS versions receive additional Envoy version support). - # - # This value should be kept current in new nightly test workflows, and updated any time - # a new major Envoy release is added to the set supported by Consul versions in - # matrix.consul-version (i.e. whenever the highest common Envoy version across active - # Consul versions changes). The minor Envoy version does not necessarily need to be - # kept current for the purpose of these tests, but the major (1.N) version should be. - ENVOY_VERSION: "1.24.12" + ENVOY_VERSION: "1.24.6" steps: - name: Checkout code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 @@ -350,7 +305,7 @@ jobs: - envoy-integration-test - upgrade-integration-test runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} - if: ${{ always() && endsWith(github.repository, '-enterprise') }} + if: ${{ always() }} steps: - name: evaluate upstream job results run: | diff --git a/.github/workflows/nightly-test-integrations-1.19.x.yml b/.github/workflows/nightly-test-integrations-1.16.x.yml similarity index 67% rename from .github/workflows/nightly-test-integrations-1.19.x.yml rename to .github/workflows/nightly-test-integrations-1.16.x.yml index 327907d184d3f..9f7cee671343d 100644 --- a/.github/workflows/nightly-test-integrations-1.19.x.yml +++ b/.github/workflows/nightly-test-integrations-1.16.x.yml @@ -1,12 +1,12 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 -name: Nightly test-integrations 1.19.x +name: Nightly test-integrations 1.16.x on: schedule: # Run nightly at 1AM UTC/9PM EST/6PM PST - - cron: '0 1 * * *' + - cron: '* 1 * * *' workflow_dispatch: {} env: @@ -19,8 +19,8 @@ env: # strip the hashicorp/ off the front of github.repository for consul CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'hashicorp/consul' }} GOPRIVATE: github.com/hashicorp # Required for enterprise deps - BRANCH: "release/1.19.x" - BRANCH_NAME: "release-1.19.x" # Used for naming artifacts + BRANCH: "release/1.16.x" + BRANCH_NAME: "release-1.16.x" # Used for naming artifacts jobs: setup: @@ -42,13 +42,6 @@ jobs: get-go-version: uses: ./.github/workflows/reusable-get-go-version.yml - with: - ref: release/1.19.x - - get-envoy-versions: - uses: ./.github/workflows/reusable-get-envoy-versions.yml - with: - ref: release/1.19.x dev-build: needs: @@ -59,7 +52,7 @@ jobs: runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} uploaded-binary-name: 'consul-bin' - branch-name: "release/1.19.x" + branch-name: "release/1.16.x" go-version: ${{ needs.get-go-version.outputs.go-version }} secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} @@ -78,34 +71,36 @@ jobs: - name: Generate Envoy Job Matrix id: set-matrix env: - # TEST_SPLITS sets the number of test case splits to use in the matrix. This will be - # further multiplied in envoy-integration tests by the other dimensions in the matrix - # to determine the total number of runners used. - TEST_SPLITS: 4 + # this is further going to multiplied in envoy-integration tests by the + # other dimensions in the matrix. Currently TOTAL_RUNNERS would be + # multiplied by 8 based on these values: + # envoy-version: ["1.23.12", "1.24.12", "1.25.11", "1.26.8"] + # xds-target: ["server", "client"] + TOTAL_RUNNERS: 8 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | + NUM_RUNNERS=$TOTAL_RUNNERS NUM_DIRS=$(find ./test/integration/connect/envoy -mindepth 1 -maxdepth 1 -type d | wc -l) - if [ "$NUM_DIRS" -lt "$TEST_SPLITS" ]; then - echo "TEST_SPLITS is larger than the number of tests/packages to split." - TEST_SPLITS=$((NUM_DIRS-1)) + if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then + echo "TOTAL_RUNNERS is larger than the number of tests/packages to split." + NUM_RUNNERS=$((NUM_DIRS-1)) fi - # fix issue where test splitting calculation generates 1 more split than TEST_SPLITS. - TEST_SPLITS=$((TEST_SPLITS-1)) + # fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS. + NUM_RUNNERS=$((NUM_RUNNERS-1)) { echo -n "envoy-matrix=" find ./test/integration/connect/envoy -maxdepth 1 -type d -print0 \ | xargs -0 -n 1 basename \ - | jq --raw-input --argjson runnercount "$TEST_SPLITS" "$JQ_SLICER" \ + | jq --raw-input --argjson runnercount "$NUM_RUNNERS" "$JQ_SLICER" \ | jq --compact-output 'map(join("|"))' } >> "$GITHUB_OUTPUT" - + envoy-integration-test: runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - get-go-version - - get-envoy-versions - generate-envoy-job-matrices - dev-build permissions: @@ -114,7 +109,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ${{ fromJSON(needs.get-envoy-versions.outputs.envoy-versions-json) }} + envoy-version: ["1.23.12", "1.24.12", "1.25.11", "1.26.8"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -145,7 +140,6 @@ jobs: run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile ./bin - name: Envoy Integration Tests - id: envoy-integration-tests env: GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml GOTESTSUM_FORMAT: standard-verbose @@ -166,23 +160,6 @@ jobs: --packages=./test/integration/connect/envoy \ -- -timeout=30m -tags integration -run="TestEnvoy/(${{ matrix.test-cases }})" - # See https://github.com/orgs/community/discussions/8945#discussioncomment-9897011 - # and overall topic discussion for why this is necessary. - - name: Generate artifact ID - id: generate-artifact-id - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - run: | - ARTIFACT_ID=$(uuidgen) - echo "Artifact ID: $ARTIFACT_ID (search this in job summary for download link)" - echo "artifact_id=$ARTIFACT_ID" >> "$GITHUB_ENV" - - - name: Upload failure logs - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 - with: - name: envoy-${{ matrix.envoy-version }}-logs-${{ env.artifact_id }} - path: test/integration/connect/envoy/workdir/logs/ - # NOTE: ENT specific step as we store secrets in Vault. - name: Authenticate to Vault if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} @@ -220,7 +197,6 @@ jobs: needs: - setup - get-go-version - - get-envoy-versions - dev-build permissions: id-token: write # NOTE: this permission is explicitly required for Vault auth. @@ -228,20 +204,10 @@ jobs: strategy: fail-fast: false matrix: - consul-version: ["1.15", "1.17", "1.18", "1.19"] + consul-version: ["1.14", "1.15", "1.16"] env: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - # ENVOY_VERSION should be the latest version supported by _all_ Consul versions in the - # matrix.consul-version, since we are testing upgrade from an older Consul version. - # In practice, this should be the highest Envoy version supported by the lowest non-LTS - # Consul version in the matrix (LTS versions receive additional Envoy version support). - # - # This value should be kept current in new nightly test workflows, and updated any time - # a new major Envoy release is added to the set supported by Consul versions in - # matrix.consul-version (i.e. whenever the highest common Envoy version across active - # Consul versions changes). The minor Envoy version does not necessarily need to be - # kept current for the purpose of these tests, but the major (1.N) version should be. - ENVOY_VERSION: 1.27.6 + ENVOY_VERSION: "1.24.6" steps: - name: Checkout code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 @@ -352,96 +318,6 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - upgrade-integration-test-deployer: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} - needs: - - setup - - get-go-version - - dev-build - permissions: - id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read - strategy: - fail-fast: false - matrix: - consul-version: ["1.15", "1.17", "1.18"] - env: - CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - steps: - - name: Checkout code - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - with: - ref: ${{ env.BRANCH }} - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: go env - - name: Build image - run: make test-deployer-setup - - name: Upgrade Integration Tests - run: | - mkdir -p "${{ env.TEST_RESULTS_DIR }}" - export NOLOGBUFFER=1 - cd ./test-integ/upgrade - docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version - go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --raw-command \ - --format=standard-verbose \ - --debug \ - --packages="./..." \ - -- \ - go test \ - -tags "${{ env.GOTAGS }}" \ - -timeout=60m \ - -parallel=2 \ - -json \ - ./... \ - --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --target-version local \ - --latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --latest-version "${{ env.CONSUL_LATEST_VERSION }}" - env: - # this is needed because of incompatibility between RYUK container and GHA - GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml - GOTESTSUM_FORMAT: standard-verbose - COMPOSE_INTERACTIVE_NO_CLI: 1 - # tput complains if this isn't set to something. - TERM: ansi - # NOTE: ENT specific step as we store secrets in Vault. - - name: Authenticate to Vault - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: vault-auth - run: vault-auth - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Fetch Secrets - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: secrets - uses: hashicorp/vault-action@v3 - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY; - - - name: prepare datadog-ci - if: ${{ !cancelled() && !endsWith(github.repository, '-enterprise') }} - run: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" - chmod +x /usr/local/bin/datadog-ci - - - name: upload coverage - # do not run on forks - if: ${{ !cancelled() && github.event.pull_request.head.repo.full_name == github.repository }} - env: - DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" - DD_ENV: ci - run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml test-integrations-success: needs: @@ -450,7 +326,6 @@ jobs: - generate-envoy-job-matrices - envoy-integration-test - upgrade-integration-test - - upgrade-integration-test-deployer runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} if: ${{ always() }} steps: diff --git a/.github/workflows/nightly-test-integrations-1.17.x.yml b/.github/workflows/nightly-test-integrations-1.17.x.yml index 471cdb163f043..f0b810150eaf0 100644 --- a/.github/workflows/nightly-test-integrations-1.17.x.yml +++ b/.github/workflows/nightly-test-integrations-1.17.x.yml @@ -6,7 +6,7 @@ name: Nightly test-integrations 1.17.x on: schedule: # Run nightly at 1AM UTC/9PM EST/6PM PST - - cron: '0 1 * * *' + - cron: '* 1 * * *' workflow_dispatch: {} env: @@ -23,15 +23,8 @@ env: BRANCH_NAME: "release-1.17.x" # Used for naming artifacts jobs: - check-ent: - runs-on: ubuntu-latest - if: ${{ endsWith(github.repository, '-enterprise') }} - steps: - - run: echo "Building Enterprise" - setup: runs-on: ubuntu-latest - needs: [check-ent] name: Setup outputs: compute-small: ${{ steps.runners.outputs.compute-small }} @@ -48,16 +41,7 @@ jobs: run: .github/scripts/get_runner_classes.sh get-go-version: - needs: [check-ent] uses: ./.github/workflows/reusable-get-go-version.yml - with: - ref: release/1.17.x - - get-envoy-versions: - needs: [check-ent] - uses: ./.github/workflows/reusable-get-envoy-versions.yml - with: - ref: release/1.17.x dev-build: needs: @@ -87,34 +71,36 @@ jobs: - name: Generate Envoy Job Matrix id: set-matrix env: - # TEST_SPLITS sets the number of test case splits to use in the matrix. This will be - # further multiplied in envoy-integration tests by the other dimensions in the matrix - # to determine the total number of runners used. - TEST_SPLITS: 4 + # this is further going to multiplied in envoy-integration tests by the + # other dimensions in the matrix. Currently TOTAL_RUNNERS would be + # multiplied by 8 based on these values: + # envoy-version: ["1.24.12", "1.25.11", "1.26.8", "1.27.5"] + # xds-target: ["server", "client"] + TOTAL_RUNNERS: 4 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | + NUM_RUNNERS=$TOTAL_RUNNERS NUM_DIRS=$(find ./test/integration/connect/envoy -mindepth 1 -maxdepth 1 -type d | wc -l) - if [ "$NUM_DIRS" -lt "$TEST_SPLITS" ]; then - echo "TEST_SPLITS is larger than the number of tests/packages to split." - TEST_SPLITS=$((NUM_DIRS-1)) + if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then + echo "TOTAL_RUNNERS is larger than the number of tests/packages to split." + NUM_RUNNERS=$((NUM_DIRS-1)) fi - # fix issue where test splitting calculation generates 1 more split than TEST_SPLITS. - TEST_SPLITS=$((TEST_SPLITS-1)) + # fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS. + NUM_RUNNERS=$((NUM_RUNNERS-1)) { echo -n "envoy-matrix=" find ./test/integration/connect/envoy -maxdepth 1 -type d -print0 \ | xargs -0 -n 1 basename \ - | jq --raw-input --argjson runnercount "$TEST_SPLITS" "$JQ_SLICER" \ + | jq --raw-input --argjson runnercount "$NUM_RUNNERS" "$JQ_SLICER" \ | jq --compact-output 'map(join("|"))' } >> "$GITHUB_OUTPUT" - + envoy-integration-test: runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - get-go-version - - get-envoy-versions - generate-envoy-job-matrices - dev-build permissions: @@ -123,7 +109,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ${{ fromJSON(needs.get-envoy-versions.outputs.envoy-versions-json) }} + envoy-version: ["1.24.12", "1.25.11", "1.26.8", "1.27.6"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -154,7 +140,6 @@ jobs: run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile ./bin - name: Envoy Integration Tests - id: envoy-integration-tests env: GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml GOTESTSUM_FORMAT: standard-verbose @@ -175,23 +160,6 @@ jobs: --packages=./test/integration/connect/envoy \ -- -timeout=30m -tags integration -run="TestEnvoy/(${{ matrix.test-cases }})" - # See https://github.com/orgs/community/discussions/8945#discussioncomment-9897011 - # and overall topic discussion for why this is necessary. - - name: Generate artifact ID - id: generate-artifact-id - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - run: | - ARTIFACT_ID=$(uuidgen) - echo "Artifact ID: $ARTIFACT_ID (search this in job summary for download link)" - echo "artifact_id=$ARTIFACT_ID" >> "$GITHUB_ENV" - - - name: Upload failure logs - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 - with: - name: envoy-${{ matrix.envoy-version }}-logs-${{ env.artifact_id }} - path: test/integration/connect/envoy/workdir/logs/ - # NOTE: ENT specific step as we store secrets in Vault. - name: Authenticate to Vault if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} @@ -223,7 +191,7 @@ jobs: DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - + upgrade-integration-test: runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: @@ -239,17 +207,7 @@ jobs: consul-version: ["1.15", "1.16", "1.17"] env: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - # ENVOY_VERSION should be the latest version supported by _all_ Consul versions in the - # matrix.consul-version, since we are testing upgrade from an older Consul version. - # In practice, this should be the highest Envoy version supported by the lowest non-LTS - # Consul version in the matrix (LTS versions receive additional Envoy version support). - # - # This value should be kept current in new nightly test workflows, and updated any time - # a new major Envoy release is added to the set supported by Consul versions in - # matrix.consul-version (i.e. whenever the highest common Envoy version across active - # Consul versions changes). The minor Envoy version does not necessarily need to be - # kept current for the purpose of these tests, but the major (1.N) version should be. - ENVOY_VERSION: 1.27.6 + ENVOY_VERSION: "1.24.6" steps: - name: Checkout code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 @@ -460,7 +418,7 @@ jobs: - upgrade-integration-test - upgrade-integration-test-deployer runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} - if: ${{ always() && endsWith(github.repository, '-enterprise') }} + if: ${{ always() }} steps: - name: evaluate upstream job results run: | diff --git a/.github/workflows/nightly-test-integrations-1.18.x.yml b/.github/workflows/nightly-test-integrations-1.18.x.yml deleted file mode 100644 index 2d358cda69ed6..0000000000000 --- a/.github/workflows/nightly-test-integrations-1.18.x.yml +++ /dev/null @@ -1,482 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -name: Nightly test-integrations 1.18.x - -on: - schedule: - # Run nightly at 1AM UTC/9PM EST/6PM PST - - cron: '0 1 * * *' - workflow_dispatch: {} - -env: - TEST_RESULTS_DIR: /tmp/test-results - TEST_RESULTS_ARTIFACT_NAME: test-results - CONSUL_LICENSE: ${{ secrets.CONSUL_LICENSE }} - GOTAGS: ${{ endsWith(github.repository, '-enterprise') && 'consulent' || '' }} - GOTESTSUM_VERSION: "1.11.0" - CONSUL_BINARY_UPLOAD_NAME: consul-bin - # strip the hashicorp/ off the front of github.repository for consul - CONSUL_LATEST_IMAGE_NAME: ${{ endsWith(github.repository, '-enterprise') && github.repository || 'hashicorp/consul' }} - GOPRIVATE: github.com/hashicorp # Required for enterprise deps - BRANCH: "release/1.18.x" - BRANCH_NAME: "release-1.18.x" # Used for naming artifacts - -jobs: - check-ent: - runs-on: ubuntu-latest - if: ${{ endsWith(github.repository, '-enterprise') }} - steps: - - run: echo "Building Enterprise" - - setup: - runs-on: ubuntu-latest - needs: [check-ent] - name: Setup - outputs: - compute-small: ${{ steps.runners.outputs.compute-small }} - compute-medium: ${{ steps.runners.outputs.compute-medium }} - compute-large: ${{ steps.runners.outputs.compute-large }} - compute-xl: ${{ steps.runners.outputs.compute-xl }} - enterprise: ${{ steps.runners.outputs.enterprise }} - steps: - - name: Checkout code - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - with: - ref: ${{ env.BRANCH }} - - id: runners - run: .github/scripts/get_runner_classes.sh - - get-go-version: - needs: [check-ent] - uses: ./.github/workflows/reusable-get-go-version.yml - with: - ref: release/1.18.x - - get-envoy-versions: - needs: [check-ent] - uses: ./.github/workflows/reusable-get-envoy-versions.yml - with: - ref: release/1.18.x - - dev-build: - needs: - - setup - - get-go-version - uses: ./.github/workflows/reusable-dev-build.yml - with: - runs-on: ${{ needs.setup.outputs.compute-large }} - repository-name: ${{ github.repository }} - uploaded-binary-name: 'consul-bin' - branch-name: "release/1.18.x" - go-version: ${{ needs.get-go-version.outputs.go-version }} - secrets: - elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - generate-envoy-job-matrices: - needs: [setup] - runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} - name: Generate Envoy Job Matrices - outputs: - envoy-matrix: ${{ steps.set-matrix.outputs.envoy-matrix }} - steps: - - name: Checkout code - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - with: - ref: ${{ env.BRANCH }} - - name: Generate Envoy Job Matrix - id: set-matrix - env: - # TEST_SPLITS sets the number of test case splits to use in the matrix. This will be - # further multiplied in envoy-integration tests by the other dimensions in the matrix - # to determine the total number of runners used. - TEST_SPLITS: 4 - JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' - run: | - NUM_DIRS=$(find ./test/integration/connect/envoy -mindepth 1 -maxdepth 1 -type d | wc -l) - - if [ "$NUM_DIRS" -lt "$TEST_SPLITS" ]; then - echo "TEST_SPLITS is larger than the number of tests/packages to split." - TEST_SPLITS=$((NUM_DIRS-1)) - fi - # fix issue where test splitting calculation generates 1 more split than TEST_SPLITS. - TEST_SPLITS=$((TEST_SPLITS-1)) - { - echo -n "envoy-matrix=" - find ./test/integration/connect/envoy -maxdepth 1 -type d -print0 \ - | xargs -0 -n 1 basename \ - | jq --raw-input --argjson runnercount "$TEST_SPLITS" "$JQ_SLICER" \ - | jq --compact-output 'map(join("|"))' - } >> "$GITHUB_OUTPUT" - - envoy-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} - needs: - - setup - - get-go-version - - get-envoy-versions - - generate-envoy-job-matrices - - dev-build - permissions: - id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read - strategy: - fail-fast: false - matrix: - envoy-version: ${{ fromJSON(needs.get-envoy-versions.outputs.envoy-versions-json) }} - xds-target: ["server", "client"] - test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} - env: - ENVOY_VERSION: ${{ matrix.envoy-version }} - XDS_TARGET: ${{ matrix.xds-target }} - AWS_LAMBDA_REGION: us-west-2 - steps: - - name: Checkout code - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - with: - ref: ${{ env.BRANCH }} - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - - name: fetch binary - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 - with: - name: '${{ env.CONSUL_BINARY_UPLOAD_NAME }}' - path: ./bin - - name: restore mode+x - run: chmod +x ./bin/consul - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 - - - name: Docker build - run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile ./bin - - - name: Envoy Integration Tests - id: envoy-integration-tests - env: - GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml - GOTESTSUM_FORMAT: standard-verbose - COMPOSE_INTERACTIVE_NO_CLI: 1 - LAMBDA_TESTS_ENABLED: "true" - # tput complains if this isn't set to something. - TERM: ansi - run: | - # shellcheck disable=SC2001 - echo "Running $(sed 's,|, ,g' <<< "${{ matrix.test-cases }}" |wc -w) subtests" - # shellcheck disable=SC2001 - sed 's,|,\n,g' <<< "${{ matrix.test-cases }}" - go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --debug \ - --rerun-fails \ - --rerun-fails-report=/tmp/gotestsum-rerun-fails \ - --jsonfile /tmp/jsonfile/go-test.log \ - --packages=./test/integration/connect/envoy \ - -- -timeout=30m -tags integration -run="TestEnvoy/(${{ matrix.test-cases }})" - - # See https://github.com/orgs/community/discussions/8945#discussioncomment-9897011 - # and overall topic discussion for why this is necessary. - - name: Generate artifact ID - id: generate-artifact-id - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - run: | - ARTIFACT_ID=$(uuidgen) - echo "Artifact ID: $ARTIFACT_ID (search this in job summary for download link)" - echo "artifact_id=$ARTIFACT_ID" >> "$GITHUB_ENV" - - - name: Upload failure logs - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 - with: - name: envoy-${{ matrix.envoy-version }}-logs-${{ env.artifact_id }} - path: test/integration/connect/envoy/workdir/logs/ - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Authenticate to Vault - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: vault-auth - run: vault-auth - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Fetch Secrets - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: secrets - uses: hashicorp/vault-action@v3 - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY; - - - name: prepare datadog-ci - if: ${{ !cancelled() && !endsWith(github.repository, '-enterprise') }} - run: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" - chmod +x /usr/local/bin/datadog-ci - - - name: upload coverage - # do not run on forks - if: ${{ !cancelled() && github.event.pull_request.head.repo.full_name == github.repository }} - env: - DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" - DD_ENV: ci - run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - - upgrade-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} - needs: - - setup - - get-go-version - - dev-build - permissions: - id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read - strategy: - fail-fast: false - matrix: - consul-version: ["1.15", "1.16", "1.17", "1.18"] - env: - CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - # ENVOY_VERSION should be the latest version supported by _all_ Consul versions in the - # matrix.consul-version, since we are testing upgrade from an older Consul version. - # In practice, this should be the highest Envoy version supported by the lowest non-LTS - # Consul version in the matrix (LTS versions receive additional Envoy version support). - # - # This value should be kept current in new nightly test workflows, and updated any time - # a new major Envoy release is added to the set supported by Consul versions in - # matrix.consul-version (i.e. whenever the highest common Envoy version across active - # Consul versions changes). The minor Envoy version does not necessarily need to be - # kept current for the purpose of these tests, but the major (1.N) version should be. - ENVOY_VERSION: 1.27.6 - steps: - - name: Checkout code - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - with: - ref: ${{ env.BRANCH }} - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: go env - - # Get go binary from workspace - - name: fetch binary - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 - with: - name: '${{ env.CONSUL_BINARY_UPLOAD_NAME }}' - path: . - - name: restore mode+x - run: chmod +x consul - - name: Build consul:local image - run: docker build -t ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local -f ./build-support/docker/Consul-Dev.dockerfile . - - name: Build consul-envoy:latest-version image - id: buildConsulEnvoyLatestImage - run: | - if ${{ endsWith(github.repository, '-enterprise') }} == 'true' - then - docker build -t consul-envoy:latest-version --build-arg CONSUL_IMAGE=docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }}:${{ env.CONSUL_LATEST_VERSION }}-ent --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - else - docker build -t consul-envoy:latest-version --build-arg CONSUL_IMAGE=docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }}:${{ env.CONSUL_LATEST_VERSION }} --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - fi - - name: Build consul-envoy:target-version image - id: buildConsulEnvoyTargetImage - continue-on-error: true - run: docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=${{ env.CONSUL_LATEST_IMAGE_NAME }}:local --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - - name: Retry Build consul-envoy:target-version image - if: steps.buildConsulEnvoyTargetImage.outcome == 'failure' - run: docker build -t consul-envoy:target-version --build-arg CONSUL_IMAGE=${{ env.CONSUL_LATEST_IMAGE_NAME }}:local --build-arg ENVOY_VERSION=${{ env.ENVOY_VERSION }} -f ./test/integration/consul-container/assets/Dockerfile-consul-envoy ./test/integration/consul-container/assets - - name: Build sds image - run: docker build -t consul-sds-server ./test/integration/connect/envoy/test-sds-server/ - - name: Configure GH workaround for ipv6 loopback - if: ${{ !endsWith(github.repository, '-enterprise') }} - run: | - cat /etc/hosts && echo "-----------" - sudo sed -i 's/::1 *localhost ip6-localhost ip6-loopback/::1 ip6-localhost ip6-loopback/g' /etc/hosts - cat /etc/hosts - - name: Upgrade Integration Tests - run: | - mkdir -p "${{ env.TEST_RESULTS_DIR }}" - cd ./test/integration/consul-container/test/upgrade - docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version - go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --raw-command \ - --format=github-actions \ - --rerun-fails \ - --packages="./..." \ - -- \ - go test \ - -p=4 \ - -tags "${{ env.GOTAGS }}" \ - -timeout=30m \ - -json \ - ./... \ - --follow-log=false \ - --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --target-version local \ - --latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --latest-version "${{ env.CONSUL_LATEST_VERSION }}" - ls -lrt - env: - # this is needed because of incompatibility between RYUK container and GHA - GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml - GOTESTSUM_FORMAT: standard-verbose - COMPOSE_INTERACTIVE_NO_CLI: 1 - # tput complains if this isn't set to something. - TERM: ansi - # NOTE: ENT specific step as we store secrets in Vault. - - name: Authenticate to Vault - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: vault-auth - run: vault-auth - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Fetch Secrets - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: secrets - uses: hashicorp/vault-action@v3 - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY; - - - name: prepare datadog-ci - if: ${{ !cancelled() && !endsWith(github.repository, '-enterprise') }} - run: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" - chmod +x /usr/local/bin/datadog-ci - - - name: upload coverage - # do not run on forks - if: ${{ !cancelled() && github.event.pull_request.head.repo.full_name == github.repository }} - env: - DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" - DD_ENV: ci - run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - - upgrade-integration-test-deployer: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} - needs: - - setup - - get-go-version - - dev-build - permissions: - id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read - strategy: - fail-fast: false - matrix: - consul-version: [ "1.15", "1.16", "1.17"] - env: - CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - steps: - - name: Checkout code - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - with: - ref: ${{ env.BRANCH }} - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: ${{ needs.get-go-version.outputs.go-version }} - - run: go env - - name: Build image - run: make test-deployer-setup - - name: Upgrade Integration Tests - run: | - mkdir -p "${{ env.TEST_RESULTS_DIR }}" - export NOLOGBUFFER=1 - cd ./test-integ/upgrade - docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version - go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --raw-command \ - --format=standard-verbose \ - --debug \ - --packages="./..." \ - -- \ - go test \ - -tags "${{ env.GOTAGS }}" \ - -timeout=60m \ - -parallel=2 \ - -json \ - ./... \ - --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --target-version local \ - --latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --latest-version "${{ env.CONSUL_LATEST_VERSION }}" - env: - # this is needed because of incompatibility between RYUK container and GHA - GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml - GOTESTSUM_FORMAT: standard-verbose - COMPOSE_INTERACTIVE_NO_CLI: 1 - # tput complains if this isn't set to something. - TERM: ansi - # NOTE: ENT specific step as we store secrets in Vault. - - name: Authenticate to Vault - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: vault-auth - run: vault-auth - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Fetch Secrets - if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} - id: secrets - uses: hashicorp/vault-action@v3 - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY; - - - name: prepare datadog-ci - if: ${{ !cancelled() && !endsWith(github.repository, '-enterprise') }} - run: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" - chmod +x /usr/local/bin/datadog-ci - - - name: upload coverage - # do not run on forks - if: ${{ !cancelled() && github.event.pull_request.head.repo.full_name == github.repository }} - env: - DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" - DD_ENV: ci - run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - - test-integrations-success: - needs: - - setup - - dev-build - - generate-envoy-job-matrices - - envoy-integration-test - - upgrade-integration-test - - upgrade-integration-test-deployer - runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} - if: ${{ always() && endsWith(github.repository, '-enterprise') }} - steps: - - name: evaluate upstream job results - run: | - # exit 1 if failure or cancelled result for any upstream job - if printf '${{ toJSON(needs) }}' | grep -E -i '\"result\": \"(failure|cancelled)\"'; then - printf "Tests failed or workflow cancelled:\n\n${{ toJSON(needs) }}" - exit 1 - fi - - name: Notify Slack - if: ${{ failure() }} - id: slack - uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 - with: - payload: | - { - "message": "One or more nightly integration tests have failed on branch ${{ env.BRANCH }} for Consul. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_NIGHTLY_INTEG_TEST_SLACK_WEBHOOK }} diff --git a/.github/workflows/nightly-test-integrations.yml b/.github/workflows/nightly-test-integrations.yml index cfaa25303062a..bf914f88935d4 100644 --- a/.github/workflows/nightly-test-integrations.yml +++ b/.github/workflows/nightly-test-integrations.yml @@ -6,7 +6,7 @@ name: Nightly test-integrations on: schedule: # Run nightly at 12AM UTC/8PM EST/5PM PST - - cron: '0 0 * * *' + - cron: '* 0 * * *' workflow_dispatch: {} env: @@ -41,9 +41,6 @@ jobs: get-go-version: uses: ./.github/workflows/reusable-get-go-version.yml - get-envoy-versions: - uses: ./.github/workflows/reusable-get-envoy-versions.yml - dev-build: needs: - setup @@ -58,9 +55,7 @@ jobs: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} generate-envoy-job-matrices: - needs: - - setup - - get-envoy-versions + needs: [setup] runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} name: Generate Envoy Job Matrices outputs: @@ -73,25 +68,28 @@ jobs: - name: Generate Envoy Job Matrix id: set-matrix env: - # TEST_SPLITS sets the number of test case splits to use in the matrix. This will be - # further multiplied in envoy-integration tests by the other dimensions in the matrix - # to determine the total number of runners used. - TEST_SPLITS: 4 + # this is further going to multiplied in envoy-integration tests by the + # other dimensions in the matrix. Currently TOTAL_RUNNERS would be + # multiplied by 8 based on these values: + # envoy-version: ["1.26.8", "1.27.5", "1.28.3", "1.29.4"] + # xds-target: ["server", "client"] + TOTAL_RUNNERS: 8 JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]' run: | + NUM_RUNNERS=$TOTAL_RUNNERS NUM_DIRS=$(find ./test/integration/connect/envoy -mindepth 1 -maxdepth 1 -type d | wc -l) - if [ "$NUM_DIRS" -lt "$TEST_SPLITS" ]; then - echo "TEST_SPLITS is larger than the number of tests/packages to split." - TEST_SPLITS=$((NUM_DIRS-1)) + if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then + echo "TOTAL_RUNNERS is larger than the number of tests/packages to split." + NUM_RUNNERS=$((NUM_DIRS-1)) fi - # fix issue where test splitting calculation generates 1 more split than TEST_SPLITS. - TEST_SPLITS=$((TEST_SPLITS-1)) + # fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS. + NUM_RUNNERS=$((NUM_RUNNERS-1)) { echo -n "envoy-matrix=" find ./test/integration/connect/envoy -maxdepth 1 -type d -print0 \ | xargs -0 -n 1 basename \ - | jq --raw-input --argjson runnercount "$TEST_SPLITS" "$JQ_SLICER" \ + | jq --raw-input --argjson runnercount "$NUM_RUNNERS" "$JQ_SLICER" \ | jq --compact-output 'map(join("|"))' } >> "$GITHUB_OUTPUT" @@ -100,7 +98,6 @@ jobs: needs: - setup - get-go-version - - get-envoy-versions - generate-envoy-job-matrices - dev-build permissions: @@ -109,7 +106,7 @@ jobs: strategy: fail-fast: false matrix: - envoy-version: ${{ fromJSON(needs.get-envoy-versions.outputs.envoy-versions-json) }} + envoy-version: ["1.26.8", "1.27.6", "1.28.4", "1.29.5"] xds-target: ["server", "client"] test-cases: ${{ fromJSON(needs.generate-envoy-job-matrices.outputs.envoy-matrix) }} env: @@ -191,7 +188,7 @@ jobs: DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - + upgrade-integration-test: runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} needs: @@ -204,20 +201,13 @@ jobs: strategy: fail-fast: false matrix: - consul-version: ["1.17", "1.18", "1.19"] + consul-version: [ "1.17", "1.18"] env: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} - # ENVOY_VERSION should be the latest version supported by _all_ Consul versions in the - # matrix.consul-version, since we are testing upgrade from an older Consul version. - # In practice, this should be the highest Envoy version supported by the lowest non-LTS - # Consul version in the matrix (LTS versions receive additional Envoy version support). - # - # This value should be kept current in new nightly test workflows, and updated any time - # a new major Envoy release is added to the set supported by Consul versions in - # matrix.consul-version (i.e. whenever the highest common Envoy version across active - # Consul versions changes). The minor Envoy version does not necessarily need to be - # kept current for the purpose of these tests, but the major (1.N) version should be. - ENVOY_VERSION: 1.27.6 + # ENVOY_VERSION should be the latest version upported by all + # consul versions in the matrix.consul-version, since we are testing upgrade from + # an older consul version, e.g., 1.27.5 is supported by both 1.16 and 1.17. + ENVOY_VERSION: "1.27.5" steps: - name: Checkout code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 @@ -339,7 +329,7 @@ jobs: strategy: fail-fast: false matrix: - consul-version: [ "1.17", "1.18"] + consul-version: [ "1.16", "1.17"] env: CONSUL_LATEST_VERSION: ${{ matrix.consul-version }} steps: diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index b62a4648293fc..e440d953b2770 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -340,7 +340,6 @@ jobs: run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile ./bin - name: Envoy Integration Tests - id: envoy-integration-tests env: GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml GOTESTSUM_FORMAT: standard-verbose @@ -361,23 +360,6 @@ jobs: --packages=./test/integration/connect/envoy \ -- -timeout=30m -tags integration -run="TestEnvoy/(${{ matrix.test-cases }})" - # See https://github.com/orgs/community/discussions/8945#discussioncomment-9897011 - # and overall topic discussion for why this is necessary. - - name: Generate artifact ID - id: generate-artifact-id - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - run: | - ARTIFACT_ID=$(uuidgen) - echo "Artifact ID: $ARTIFACT_ID (search this in job summary for download link)" - echo "artifact_id=$ARTIFACT_ID" >> "$GITHUB_ENV" - - - name: Upload failure logs - if: ${{ failure() && steps.envoy-integration-tests.conclusion == 'failure' }} - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 - with: - name: envoy-${{ matrix.envoy-version }}-logs-${{ env.artifact_id }} - path: test/integration/connect/envoy/workdir/logs/ - # NOTE: ENT specific step as we store secrets in Vault. - name: Authenticate to Vault if: ${{ !cancelled() && endsWith(github.repository, '-enterprise') }} @@ -422,8 +404,7 @@ jobs: contents: read env: ENVOY_VERSION: ${{ needs.get-envoy-versions.outputs.max-envoy-version }} - #TODO don't harcode this image name - CONSUL_DATAPLANE_IMAGE: "docker.io/hashicorppreview/consul-dataplane:1.6-dev-ubi" + CONSUL_DATAPLANE_IMAGE: "docker.io/hashicorppreview/consul-dataplane:1.5-dev-ubi" steps: - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. @@ -536,8 +517,7 @@ jobs: strategy: fail-fast: false env: - # TODO @sarah.alsmiller Don't hardcode this version value - DEPLOYER_CONSUL_DATAPLANE_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.6-dev" + DEPLOYER_CONSUL_DATAPLANE_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.5-dev" steps: - name: Checkout code uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 diff --git a/.release/versions.hcl b/.release/versions.hcl index fdc0f2989d27b..abdd7f3342d42 100644 --- a/.release/versions.hcl +++ b/.release/versions.hcl @@ -6,13 +6,12 @@ schema = 1 active_versions { - version "1.19" { - ce_active = true - } version "1.18" { + ce_active = true lts = true } version "1.17" {} + version "1.16" {} version "1.15" { lts = true } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d3f07d4361c2..5835f9c6440d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,38 +12,6 @@ BUG FIXES: * api-gateway: **(Enterprise only)** ensure clusters are properly created for JWT providers with a remote URI for the JWKS endpoint [[GH-21604](https://github.com/hashicorp/consul/issues/21604)] -## 1.18.4 Enterprise (August 26, 2024) - -Enterprise LTS: Consul Enterprise 1.18 is a Long-Term Support (LTS) release. - -SECURITY: -* ui: Upgrade modules with d3-color as a dependency to address denial of service issue in d3-color < 3.1.0 - -IMPROVEMENTS: - -* Use Envoy's default for a route's validate_clusters option, which is false. This fixes a case where non-existent clusters could cause a route to no longer route to any of its backends, including existing ones. [[GH-21587](https://github.com/hashicorp/consul/issues/21587)] - -## 1.17.7 Enterprise (August 26, 2024) - -SECURITY: -* ui: Upgrade modules with d3-color as a dependency to address denial of service issue in d3-color < 3.1.0 - -IMPROVEMENTS: - -* Use Envoy's default for a route's validate_clusters option, which is false. This fixes a case where non-existent clusters could cause a route to no longer route to any of its backends, including existing ones. [[GH-21587](https://github.com/hashicorp/consul/issues/21587)] - -## 1.15.14 Enterprise (August 26, 2024) - -Enterprise LTS: Consul Enterprise 1.15 is a Long-Term Support (LTS) release. - -SECURITY: - -* ui: Upgrade modules with d3-color as a dependency to address denial of service issue in d3-color < 3.1.0 [[GH-21588](https://github.com/hashicorp/consul/issues/21588)] - -IMPROVEMENTS: - -* Use Envoy's default for a route's validate_clusters option, which is false. This fixes a case where non-existent clusters could cause a route to no longer route to any of its backends, including existing ones. [[GH-21587](https://github.com/hashicorp/consul/issues/21587)] - ## 1.19.1 (July 11, 2024) SECURITY: @@ -70,82 +38,6 @@ BUG FIXES: * terminating-gateway: **(Enterprise Only)** Fixed issue where enterprise metadata applied to linked services was the terminating-gateways enterprise metadata and not the linked services enterprise metadata. [[GH-21382](https://github.com/hashicorp/consul/issues/21382)] * txn: Fix a bug where mismatched Consul server versions could result in undetected data loss for when using newer Transaction verbs. [[GH-21519](https://github.com/hashicorp/consul/issues/21519)] -## 1.18.3 Enterprise (July 11, 2024) - -**Enterprise LTS**: Consul Enterprise 1.18 is a Long-Term Support (LTS) release. - -SECURITY: - -* Upgrade envoy module dependencies to version 1.27.7, 1.28.5 and 1.29.7 or higher to resolve [CVE-2024-39305](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-39305) [[GH-21524](https://github.com/hashicorp/consul/issues/21524)] -* Upgrade go version to 1.22.5 to address [CVE-2024-24791](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24791) [[GH-21507](https://github.com/hashicorp/consul/issues/21507)] -* Upgrade go-retryablehttp to address [CVE-2024-6104](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-6104) [[GH-21384](https://github.com/hashicorp/consul/issues/21384)] -* agent: removed reflected cross-site scripting vulnerability [[GH-21342](https://github.com/hashicorp/consul/issues/21342)] -* ui: Pin and namespace sub-module dependencies related to the Consul UI [[GH-21378](https://github.com/hashicorp/consul/issues/21378)] - -IMPROVEMENTS: - -* mesh: update supported envoy version 1.29.4 -* mesh: update supported envoy version 1.29.5 in addition to 1.28.4, 1.27.6. [[GH-21277](https://github.com/hashicorp/consul/issues/21277)] -* upgrade go version to v1.22.3. [[GH-21113](https://github.com/hashicorp/consul/issues/21113)] -* upgrade go version to v1.22.4. [[GH-21265](https://github.com/hashicorp/consul/issues/21265)] - -BUG FIXES: - -* core: Fix multiple incorrect type conversion for potential overflows [[GH-21251](https://github.com/hashicorp/consul/issues/21251)] -* core: Fix panic runtime error on AliasCheck [[GH-21339](https://github.com/hashicorp/consul/issues/21339)] -* dns: Fixes a spam log message "Failed to parse TTL for prepared query..." - that was always being logged on each prepared query evaluation. [[GH-21381](https://github.com/hashicorp/consul/issues/21381)] -* terminating-gateway: **(Enterprise Only)** Fixed issue where enterprise metadata applied to linked services was the terminating-gateways enterprise metadata and not the linked services enterprise metadata. [[GH-21382](https://github.com/hashicorp/consul/issues/21382)] -* txn: Fix a bug where mismatched Consul server versions could result in undetected data loss for when using newer Transaction verbs. [[GH-21519](https://github.com/hashicorp/consul/issues/21519)] -* v2dns: Fix a regression where DNS SRV questions were returning duplicate hostnames instead of encoded IPs. - This affected Nomad integrations with Consul. [[GH-21361](https://github.com/hashicorp/consul/issues/21361)] -* v2dns: Fix a regression where DNS tags using the standard lookup syntax, `tag.name.service.consul`, were being disregarded. [[GH-21361](https://github.com/hashicorp/consul/issues/21361)] - -## 1.17.6 Enterprise (July 11, 2024) - -SECURITY: - -* Upgrade envoy module dependencies to version 1.27.7, 1.28.5 and 1.29.7 or higher to resolve [CVE-2024-39305](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-39305) [[GH-21524](https://github.com/hashicorp/consul/issues/21524)] -* Upgrade go version to 1.22.5 to address [CVE-2024-24791](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24791) [[GH-21507](https://github.com/hashicorp/consul/issues/21507)] -* Upgrade go-retryablehttp to address [CVE-2024-6104](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-6104) [[GH-21384](https://github.com/hashicorp/consul/issues/21384)] -* agent: removed reflected cross-site scripting vulnerability [[GH-21342](https://github.com/hashicorp/consul/issues/21342)] -* ui: Pin and namespace sub-module dependencies related to the Consul UI [[GH-21378](https://github.com/hashicorp/consul/issues/21378)] - -IMPROVEMENTS: - -* upgrade go version to v1.22.3. [[GH-21113](https://github.com/hashicorp/consul/issues/21113)] -* upgrade go version to v1.22.4. [[GH-21265](https://github.com/hashicorp/consul/issues/21265)] - -BUG FIXES: - -* core: Fix panic runtime error on AliasCheck [[GH-21339](https://github.com/hashicorp/consul/issues/21339)] -* terminating-gateway: **(Enterprise Only)** Fixed issue where enterprise metadata applied to linked services was the terminating-gateways enterprise metadata and not the linked services enterprise metadata. [[GH-21382](https://github.com/hashicorp/consul/issues/21382)] -* txn: Fix a bug where mismatched Consul server versions could result in undetected data loss for when using newer Transaction verbs. [[GH-21519](https://github.com/hashicorp/consul/issues/21519)] - -## 1.15.13 Enterprise (July 11, 2024) - -**Enterprise LTS**: Consul Enterprise 1.15 is a Long-Term Support (LTS) release. - -SECURITY: - -* Upgrade envoy module dependencies to version 1.27.7, 1.28.5 and 1.29.7 or higher to resolve [CVE-2024-39305](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-39305) [[GH-21524](https://github.com/hashicorp/consul/issues/21524)] -* Upgrade go version to 1.22.5 to address [CVE-2024-24791](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-24791) [[GH-21507](https://github.com/hashicorp/consul/issues/21507)] -* Upgrade go-retryablehttp to address [CVE-2024-6104](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-6104) [[GH-21384](https://github.com/hashicorp/consul/issues/21384)] -* agent: removed reflected cross-site scripting vulnerability [[GH-21342](https://github.com/hashicorp/consul/issues/21342)] -* ui: Pin and namespace sub-module dependencies related to the Consul UI [[GH-21378](https://github.com/hashicorp/consul/issues/21378)] - -IMPROVEMENTS: - -* mesh: update supported envoy version 1.29.4 -* upgrade go version to v1.22.3. [[GH-21113](https://github.com/hashicorp/consul/issues/21113)] -* upgrade go version to v1.22.4. [[GH-21265](https://github.com/hashicorp/consul/issues/21265)] - -BUG FIXES: - -* core: Fix panic runtime error on AliasCheck [[GH-21339](https://github.com/hashicorp/consul/issues/21339)] -* terminating-gateway: **(Enterprise Only)** Fixed issue where enterprise metadata applied to linked services was the terminating-gateways enterprise metadata and not the linked services enterprise metadata. [[GH-21382](https://github.com/hashicorp/consul/issues/21382)] -* txn: Fix a bug where mismatched Consul server versions could result in undetected data loss for when using newer Transaction verbs. [[GH-21519](https://github.com/hashicorp/consul/issues/21519)] - ## 1.19.0 (June 12, 2024) BREAKING CHANGES: diff --git a/Makefile b/Makefile index b8b72d5de1983..1cd135732eab5 100644 --- a/Makefile +++ b/Makefile @@ -73,8 +73,8 @@ CONSUL_IMAGE_VERSION?=latest GOLANG_VERSION?=$(shell head -n 1 .go-version) # Takes the highest version from the ENVOY_VERSIONS file. ENVOY_VERSION?=$(shell cat envoyextensions/xdscommon/ENVOY_VERSIONS | grep '^[[:digit:]]' | sort -nr | head -n 1) -CONSUL_DATAPLANE_IMAGE := $(or $(CONSUL_DATAPLANE_IMAGE),"docker.io/hashicorppreview/consul-dataplane:1.6-dev-ubi") -DEPLOYER_CONSUL_DATAPLANE_IMAGE := $(or $(DEPLOYER_CONSUL_DATAPLANE_IMAGE), "docker.io/hashicorppreview/consul-dataplane:1.6-dev") +CONSUL_DATAPLANE_IMAGE := $(or $(CONSUL_DATAPLANE_IMAGE),"docker.io/hashicorppreview/consul-dataplane:1.3-dev-ubi") +DEPLOYER_CONSUL_DATAPLANE_IMAGE := $(or $(DEPLOYER_CONSUL_DATAPLANE_IMAGE), "docker.io/hashicorppreview/consul-dataplane:1.3-dev") CONSUL_VERSION?=$(shell cat version/VERSION) @@ -294,6 +294,7 @@ lint-container-test-deps: ## Check that the test-container module only imports a @cd test/integration/consul-container && \ $(CURDIR)/build-support/scripts/check-allowed-imports.sh \ github.com/hashicorp/consul \ + "internal/catalog/catalogtest" \ "internal/resource/resourcetest" ##@ Testing diff --git a/acl/MockAuthorizer.go b/acl/MockAuthorizer.go index 7e41db074f5cd..e3a97ceec9bf6 100644 --- a/acl/MockAuthorizer.go +++ b/acl/MockAuthorizer.go @@ -59,6 +59,31 @@ func (m *MockAuthorizer) EventWrite(segment string, ctx *AuthorizerContext) Enfo return ret.Get(0).(EnforcementDecision) } +// IdentityRead checks for permission to read a given workload identity. +func (m *MockAuthorizer) IdentityRead(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// IdentityReadAll checks for permission to read all workload identities. +func (m *MockAuthorizer) IdentityReadAll(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + +// IdentityWrite checks for permission to create or update a given +// workload identity. +func (m *MockAuthorizer) IdentityWrite(segment string, ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(segment, ctx) + return ret.Get(0).(EnforcementDecision) +} + +// IdentityWriteAny checks for write permission on any workload identity. +func (m *MockAuthorizer) IdentityWriteAny(ctx *AuthorizerContext) EnforcementDecision { + ret := m.Called(ctx) + return ret.Get(0).(EnforcementDecision) +} + // IntentionDefaultAllow determines the default authorized behavior // when no intentions match a Connect request. func (m *MockAuthorizer) IntentionDefaultAllow(ctx *AuthorizerContext) EnforcementDecision { diff --git a/acl/acl_test.go b/acl/acl_test.go index 3f4c882b0e415..28542024e9567 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -40,6 +40,22 @@ func checkAllowEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx require.Equal(t, Allow, authz.EventWrite(prefix, entCtx)) } +func checkAllowIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Allow, authz.IdentityRead(prefix, entCtx)) +} + +func checkAllowIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Allow, authz.IdentityReadAll(entCtx)) +} + +func checkAllowIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Allow, authz.IdentityWrite(prefix, entCtx)) +} + +func checkAllowIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Allow, authz.IdentityWriteAny(entCtx)) +} + func checkAllowIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Allow, authz.IntentionDefaultAllow(entCtx)) } @@ -180,6 +196,22 @@ func checkDenyEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx * require.Equal(t, Deny, authz.EventWrite(prefix, entCtx)) } +func checkDenyIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Deny, authz.IdentityRead(prefix, entCtx)) +} + +func checkDenyIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Deny, authz.IdentityReadAll(entCtx)) +} + +func checkDenyIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Deny, authz.IdentityWrite(prefix, entCtx)) +} + +func checkDenyIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Deny, authz.IdentityWriteAny(entCtx)) +} + func checkDenyIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Deny, authz.IntentionDefaultAllow(entCtx)) } @@ -328,6 +360,22 @@ func checkDefaultEventWrite(t *testing.T, authz Authorizer, prefix string, entCt require.Equal(t, Default, authz.EventWrite(prefix, entCtx)) } +func checkDefaultIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Default, authz.IdentityRead(prefix, entCtx)) +} + +func checkDefaultIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Default, authz.IdentityReadAll(entCtx)) +} + +func checkDefaultIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { + require.Equal(t, Default, authz.IdentityWrite(prefix, entCtx)) +} + +func checkDefaultIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) { + require.Equal(t, Default, authz.IdentityWriteAny(entCtx)) +} + func checkDefaultIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) { require.Equal(t, Default, authz.IntentionDefaultAllow(entCtx)) } @@ -468,6 +516,10 @@ func TestACL(t *testing.T) { {name: "DenyIntentionDefaultAllow", check: checkDenyIntentionDefaultAllow}, {name: "DenyIntentionRead", check: checkDenyIntentionRead}, {name: "DenyIntentionWrite", check: checkDenyIntentionWrite}, + {name: "DenyIdentityRead", check: checkDenyIdentityRead}, + {name: "DenyIdentityReadAll", check: checkDenyIdentityReadAll}, + {name: "DenyIdentityWrite", check: checkDenyIdentityWrite}, + {name: "DenyIdentityWriteAny", check: checkDenyIdentityWriteAny}, {name: "DenyKeyRead", check: checkDenyKeyRead}, {name: "DenyKeyringRead", check: checkDenyKeyringRead}, {name: "DenyKeyringWrite", check: checkDenyKeyringWrite}, @@ -502,6 +554,10 @@ func TestACL(t *testing.T) { {name: "AllowAgentWrite", check: checkAllowAgentWrite}, {name: "AllowEventRead", check: checkAllowEventRead}, {name: "AllowEventWrite", check: checkAllowEventWrite}, + {name: "AllowIdentityRead", check: checkAllowIdentityRead}, + {name: "AllowIdentityReadAll", check: checkAllowIdentityReadAll}, + {name: "AllowIdentityWrite", check: checkAllowIdentityWrite}, + {name: "AllowIdentityWriteAny", check: checkAllowIdentityWriteAny}, {name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow}, {name: "AllowIntentionRead", check: checkAllowIntentionRead}, {name: "AllowIntentionWrite", check: checkAllowIntentionWrite}, @@ -541,6 +597,10 @@ func TestACL(t *testing.T) { {name: "AllowAgentWrite", check: checkAllowAgentWrite}, {name: "AllowEventRead", check: checkAllowEventRead}, {name: "AllowEventWrite", check: checkAllowEventWrite}, + {name: "AllowIdentityRead", check: checkAllowIdentityRead}, + {name: "AllowIdentityReadAll", check: checkAllowIdentityReadAll}, + {name: "AllowIdentityWrite", check: checkAllowIdentityWrite}, + {name: "AllowIdentityWriteAny", check: checkAllowIdentityWriteAny}, {name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow}, {name: "AllowIntentionRead", check: checkAllowIntentionRead}, {name: "AllowIntentionWrite", check: checkAllowIntentionWrite}, @@ -940,6 +1000,134 @@ func TestACL(t *testing.T) { {name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowAgentWrite}, }, }, + { + name: "IdentityDefaultAllowPolicyDeny", + defaultPolicy: AllowAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyDeny, + }, + }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "prefix", + Policy: PolicyDeny, + }, + }, + }, + }, + }, + checks: []aclCheck{ + {name: "IdentityFooReadDenied", prefix: "foo", check: checkDenyIdentityRead}, + {name: "IdentityFooWriteDenied", prefix: "foo", check: checkDenyIdentityWrite}, + {name: "IdentityPrefixReadDenied", prefix: "prefix", check: checkDenyIdentityRead}, + {name: "IdentityPrefixWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite}, + {name: "IdentityBarReadAllowed", prefix: "fail", check: checkAllowIdentityRead}, + {name: "IdentityBarWriteAllowed", prefix: "fail", check: checkAllowIdentityWrite}, + }, + }, + { + name: "IdentityDefaultDenyPolicyAllow", + defaultPolicy: DenyAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyWrite, + }, + }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "prefix", + Policy: PolicyRead, + }, + }, + }, + }, + }, + checks: []aclCheck{ + {name: "IdentityFooReadAllowed", prefix: "foo", check: checkAllowIdentityRead}, + {name: "IdentityFooWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite}, + {name: "IdentityPrefixReadAllowed", prefix: "prefix", check: checkAllowIdentityRead}, + {name: "IdentityPrefixWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite}, + {name: "IdentityBarReadDenied", prefix: "fail", check: checkDenyIdentityRead}, + {name: "IdentityBarWriteDenied", prefix: "fail", check: checkDenyIdentityWrite}, + }, + }, + { + name: "IdentityDefaultDenyPolicyComplex", + defaultPolicy: DenyAll(), + policyStack: []*Policy{ + { + PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "football", + Policy: PolicyRead, + }, + { + Name: "prefix-forbidden", + Policy: PolicyDeny, + Intentions: PolicyDeny, + }, + }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + { + Name: "prefix", + Policy: PolicyRead, + Intentions: PolicyWrite, + }, + }, + }, + }, + { + PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foozball", + Policy: PolicyWrite, + Intentions: PolicyRead, + }, + }, + }, + }, + }, + checks: []aclCheck{ + {name: "IdentityReadAllowed", prefix: "foo", check: checkAllowIdentityRead}, + {name: "IdentityWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite}, + {name: "TrafficPermissionsReadAllowed", prefix: "foo", check: checkAllowTrafficPermissionsRead}, + {name: "TrafficPermissionsWriteAllowed", prefix: "foo", check: checkAllowTrafficPermissionsWrite}, + {name: "IdentityReadAllowed", prefix: "football", check: checkAllowIdentityRead}, + {name: "IdentityWriteDenied", prefix: "football", check: checkDenyIdentityWrite}, + {name: "TrafficPermissionsReadAllowed", prefix: "football", check: checkAllowTrafficPermissionsRead}, + // This might be surprising but omitting intention rule gives at most intention:read + // if we have identity:write perms. This matches services as well. + {name: "TrafficPermissionsWriteDenied", prefix: "football", check: checkDenyTrafficPermissionsWrite}, + {name: "IdentityReadAllowed", prefix: "prefix", check: checkAllowIdentityRead}, + {name: "IdentityWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite}, + {name: "TrafficPermissionsReadAllowed", prefix: "prefix", check: checkAllowTrafficPermissionsRead}, + {name: "TrafficPermissionsWriteDenied", prefix: "prefix", check: checkAllowTrafficPermissionsWrite}, + {name: "IdentityReadDenied", prefix: "prefix-forbidden", check: checkDenyIdentityRead}, + {name: "IdentityWriteDenied", prefix: "prefix-forbidden", check: checkDenyIdentityWrite}, + {name: "TrafficPermissionsReadDenied", prefix: "prefix-forbidden", check: checkDenyTrafficPermissionsRead}, + {name: "TrafficPermissionsWriteDenied", prefix: "prefix-forbidden", check: checkDenyTrafficPermissionsWrite}, + {name: "IdentityReadAllowed", prefix: "foozball", check: checkAllowIdentityRead}, + {name: "IdentityWriteAllowed", prefix: "foozball", check: checkAllowIdentityWrite}, + {name: "TrafficPermissionsReadAllowed", prefix: "foozball", check: checkAllowTrafficPermissionsRead}, + {name: "TrafficPermissionsWriteDenied", prefix: "foozball", check: checkDenyTrafficPermissionsWrite}, + }, + }, { name: "KeyringDefaultAllowPolicyDeny", defaultPolicy: AllowAll(), diff --git a/acl/authorizer.go b/acl/authorizer.go index 937d861129dc8..39bac5f7b08b8 100644 --- a/acl/authorizer.go +++ b/acl/authorizer.go @@ -43,6 +43,7 @@ const ( ResourceACL Resource = "acl" ResourceAgent Resource = "agent" ResourceEvent Resource = "event" + ResourceIdentity Resource = "identity" ResourceIntention Resource = "intention" ResourceKey Resource = "key" ResourceKeyring Resource = "keyring" @@ -77,6 +78,19 @@ type Authorizer interface { // EventWrite determines if a specific event may be fired. EventWrite(string, *AuthorizerContext) EnforcementDecision + // IdentityRead checks for permission to read a given workload identity. + IdentityRead(string, *AuthorizerContext) EnforcementDecision + + // IdentityReadAll checks for permission to read all workload identities. + IdentityReadAll(*AuthorizerContext) EnforcementDecision + + // IdentityWrite checks for permission to create or update a given + // workload identity. + IdentityWrite(string, *AuthorizerContext) EnforcementDecision + + // IdentityWriteAny checks for write permission on any workload identity. + IdentityWriteAny(*AuthorizerContext) EnforcementDecision + // IntentionDefaultAllow determines the default authorized behavior // when no intentions match a Connect request. // @@ -253,6 +267,40 @@ func (a AllowAuthorizer) EventWriteAllowed(name string, ctx *AuthorizerContext) return nil } +// IdentityReadAllowed checks for permission to read a given workload identity, +func (a AllowAuthorizer) IdentityReadAllowed(name string, ctx *AuthorizerContext) error { + if a.Authorizer.IdentityRead(name, ctx) != Allow { + return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessRead, name) + } + return nil +} + +// IdentityReadAllAllowed checks for permission to read all workload identities. +func (a AllowAuthorizer) IdentityReadAllAllowed(ctx *AuthorizerContext) error { + if a.Authorizer.IdentityReadAll(ctx) != Allow { + // This is only used to gate certain UI functions right now (e.g metrics) + return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessRead, "all identities") // read + } + return nil +} + +// IdentityWriteAllowed checks for permission to create or update a given +// workload identity. +func (a AllowAuthorizer) IdentityWriteAllowed(name string, ctx *AuthorizerContext) error { + if a.Authorizer.IdentityWrite(name, ctx) != Allow { + return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessWrite, name) + } + return nil +} + +// IdentityWriteAnyAllowed checks for write permission on any workload identity +func (a AllowAuthorizer) IdentityWriteAnyAllowed(ctx *AuthorizerContext) error { + if a.Authorizer.IdentityWriteAny(ctx) != Allow { + return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessWrite, "any identity") + } + return nil +} + // IntentionReadAllowed determines if a specific intention can be read. func (a AllowAuthorizer) IntentionReadAllowed(name string, ctx *AuthorizerContext) error { if a.Authorizer.IntentionRead(name, ctx) != Allow { @@ -531,6 +579,13 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx case "write": return authz.EventWrite(segment, ctx), nil } + case ResourceIdentity: + switch lowerAccess { + case "read": + return authz.IdentityRead(segment, ctx), nil + case "write": + return authz.IdentityWrite(segment, ctx), nil + } case ResourceIntention: switch lowerAccess { case "read": diff --git a/acl/authorizer_test.go b/acl/authorizer_test.go index 09cba85fa6b5f..d538a04ad7152 100644 --- a/acl/authorizer_test.go +++ b/acl/authorizer_test.go @@ -188,6 +188,34 @@ func TestACL_Enforce(t *testing.T) { ret: Deny, err: "Invalid access level", }, + { + method: "IdentityRead", + resource: ResourceIdentity, + segment: "foo", + access: "read", + ret: Deny, + }, + { + method: "IdentityRead", + resource: ResourceIdentity, + segment: "foo", + access: "read", + ret: Allow, + }, + { + method: "IdentityWrite", + resource: ResourceIdentity, + segment: "foo", + access: "write", + ret: Deny, + }, + { + method: "IdentityWrite", + resource: ResourceIdentity, + segment: "foo", + access: "write", + ret: Allow, + }, { method: "IntentionRead", resource: ResourceIntention, diff --git a/acl/chained_authorizer.go b/acl/chained_authorizer.go index 15016e984928b..26f0c2dfe7fde 100644 --- a/acl/chained_authorizer.go +++ b/acl/chained_authorizer.go @@ -80,6 +80,35 @@ func (c *ChainedAuthorizer) EventWrite(name string, entCtx *AuthorizerContext) E }) } +// IdentityRead checks for permission to read a given workload identity. +func (c *ChainedAuthorizer) IdentityRead(name string, entCtx *AuthorizerContext) EnforcementDecision { + return c.executeChain(func(authz Authorizer) EnforcementDecision { + return authz.IdentityRead(name, entCtx) + }) +} + +// IdentityReadAll checks for permission to read all workload identities. +func (c *ChainedAuthorizer) IdentityReadAll(entCtx *AuthorizerContext) EnforcementDecision { + return c.executeChain(func(authz Authorizer) EnforcementDecision { + return authz.IdentityReadAll(entCtx) + }) +} + +// IdentityWrite checks for permission to create or update a given +// workload identity. +func (c *ChainedAuthorizer) IdentityWrite(name string, entCtx *AuthorizerContext) EnforcementDecision { + return c.executeChain(func(authz Authorizer) EnforcementDecision { + return authz.IdentityWrite(name, entCtx) + }) +} + +// IdentityWriteAny checks for write permission on any workload identity. +func (c *ChainedAuthorizer) IdentityWriteAny(entCtx *AuthorizerContext) EnforcementDecision { + return c.executeChain(func(authz Authorizer) EnforcementDecision { + return authz.IdentityWriteAny(entCtx) + }) +} + // IntentionDefaultAllow determines the default authorized behavior // when no intentions match a Connect request. func (c *ChainedAuthorizer) IntentionDefaultAllow(entCtx *AuthorizerContext) EnforcementDecision { diff --git a/acl/policy.go b/acl/policy.go index 86c9e83cfc585..0c88a9041b289 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -59,6 +59,8 @@ type PolicyRules struct { ACL string `hcl:"acl,expand"` Agents []*AgentRule `hcl:"agent,expand"` AgentPrefixes []*AgentRule `hcl:"agent_prefix,expand"` + Identities []*IdentityRule `hcl:"identity,expand"` + IdentityPrefixes []*IdentityRule `hcl:"identity_prefix,expand"` Keys []*KeyRule `hcl:"key,expand"` KeyPrefixes []*KeyRule `hcl:"key_prefix,expand"` Nodes []*NodeRule `hcl:"node,expand"` @@ -75,11 +77,6 @@ type PolicyRules struct { Operator string `hcl:"operator"` Mesh string `hcl:"mesh"` Peering string `hcl:"peering"` - - // Deprecated: exists just to track the former field for decoding - Identities []*IdentityRule `hcl:"identity,expand"` - // Deprecated: exists just to track the former field for decoding - IdentityPrefixes []*IdentityRule `hcl:"identity_prefix,expand"` } // Policy is used to represent the policy specified by an ACL configuration. @@ -96,8 +93,6 @@ type AgentRule struct { } // IdentityRule represents a policy for a workload identity -// -// Deprecated: exists just to track the former field for decoding type IdentityRule struct { Name string `hcl:",key"` Policy string @@ -188,9 +183,29 @@ func (pr *PolicyRules) Validate(conf *Config) error { } } - // Identity rules are deprecated, zero them out. - pr.Identities = nil - pr.IdentityPrefixes = nil + // Validate the identity policies + for _, id := range pr.Identities { + if !isPolicyValid(id.Policy, false) { + return fmt.Errorf("Invalid identity policy: %#v", id) + } + if id.Intentions != "" && !isPolicyValid(id.Intentions, false) { + return fmt.Errorf("Invalid identity intentions policy: %#v", id) + } + if err := id.EnterpriseRule.Validate(id.Policy, conf); err != nil { + return fmt.Errorf("Invalid identity enterprise policy: %#v, got error: %v", id, err) + } + } + for _, id := range pr.IdentityPrefixes { + if !isPolicyValid(id.Policy, false) { + return fmt.Errorf("Invalid identity_prefix policy: %#v", id) + } + if id.Intentions != "" && !isPolicyValid(id.Intentions, false) { + return fmt.Errorf("Invalid identity_prefix intentions policy: %#v", id) + } + if err := id.EnterpriseRule.Validate(id.Policy, conf); err != nil { + return fmt.Errorf("Invalid identity_prefix enterprise policy: %#v, got error: %v", id, err) + } + } // Validate the key policy for _, kp := range pr.Keys { diff --git a/acl/policy_authorizer.go b/acl/policy_authorizer.go index 16ffa743f95e0..11d19609efde9 100644 --- a/acl/policy_authorizer.go +++ b/acl/policy_authorizer.go @@ -14,6 +14,9 @@ type policyAuthorizer struct { // agentRules contain the exact-match agent policies agentRules *radix.Tree + // identityRules contains the identity exact-match policies + identityRules *radix.Tree + // intentionRules contains the service intention exact-match policies intentionRules *radix.Tree @@ -183,6 +186,48 @@ func (p *policyAuthorizer) loadRules(policy *PolicyRules) error { } } + // Load the identity policy (exact matches) + for _, id := range policy.Identities { + if err := insertPolicyIntoRadix(id.Name, id.Policy, &id.EnterpriseRule, p.identityRules, false); err != nil { + return err + } + + intention := id.Intentions + if intention == "" { + switch id.Policy { + case PolicyRead, PolicyWrite: + intention = PolicyRead + default: + intention = PolicyDeny + } + } + + if err := insertPolicyIntoRadix(id.Name, intention, &id.EnterpriseRule, p.trafficPermissionsRules, false); err != nil { + return err + } + } + + // Load the identity policy (prefix matches) + for _, id := range policy.IdentityPrefixes { + if err := insertPolicyIntoRadix(id.Name, id.Policy, &id.EnterpriseRule, p.identityRules, true); err != nil { + return err + } + + intention := id.Intentions + if intention == "" { + switch id.Policy { + case PolicyRead, PolicyWrite: + intention = PolicyRead + default: + intention = PolicyDeny + } + } + + if err := insertPolicyIntoRadix(id.Name, intention, &id.EnterpriseRule, p.trafficPermissionsRules, true); err != nil { + return err + } + } + // Load the key policy (exact matches) for _, kp := range policy.Keys { if err := insertPolicyIntoRadix(kp.Prefix, kp.Policy, &kp.EnterpriseRule, p.keyRules, false); err != nil { @@ -352,6 +397,7 @@ func newPolicyAuthorizer(policies []*Policy, ent *Config) (*policyAuthorizer, er func newPolicyAuthorizerFromRules(rules *PolicyRules, ent *Config) (*policyAuthorizer, error) { p := &policyAuthorizer{ agentRules: radix.New(), + identityRules: radix.New(), intentionRules: radix.New(), trafficPermissionsRules: radix.New(), keyRules: radix.New(), @@ -532,6 +578,33 @@ func (p *policyAuthorizer) EventWrite(name string, _ *AuthorizerContext) Enforce return Default } +// IdentityRead checks for permission to read a given workload identity. +func (p *policyAuthorizer) IdentityRead(name string, _ *AuthorizerContext) EnforcementDecision { + if rule, ok := getPolicy(name, p.identityRules); ok { + return enforce(rule.access, AccessRead) + } + return Default +} + +// IdentityReadAll checks for permission to read all workload identities. +func (p *policyAuthorizer) IdentityReadAll(_ *AuthorizerContext) EnforcementDecision { + return p.allAllowed(p.identityRules, AccessRead) +} + +// IdentityWrite checks for permission to create or update a given +// workload identity. +func (p *policyAuthorizer) IdentityWrite(name string, _ *AuthorizerContext) EnforcementDecision { + if rule, ok := getPolicy(name, p.identityRules); ok { + return enforce(rule.access, AccessWrite) + } + return Default +} + +// IdentityWriteAny checks for write permission on any workload identity. +func (p *policyAuthorizer) IdentityWriteAny(_ *AuthorizerContext) EnforcementDecision { + return p.anyAllowed(p.identityRules, AccessWrite) +} + // IntentionDefaultAllow returns whether the default behavior when there are // no matching intentions is to allow or deny. func (p *policyAuthorizer) IntentionDefaultAllow(_ *AuthorizerContext) EnforcementDecision { diff --git a/acl/policy_authorizer_test.go b/acl/policy_authorizer_test.go index a2f9b929f10f1..96272d8b12f4b 100644 --- a/acl/policy_authorizer_test.go +++ b/acl/policy_authorizer_test.go @@ -41,6 +41,9 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "DefaultAgentWrite", prefix: "foo", check: checkDefaultAgentWrite}, {name: "DefaultEventRead", prefix: "foo", check: checkDefaultEventRead}, {name: "DefaultEventWrite", prefix: "foo", check: checkDefaultEventWrite}, + {name: "DefaultIdentityRead", prefix: "foo", check: checkDefaultIdentityRead}, + {name: "DefaultIdentityWrite", prefix: "foo", check: checkDefaultIdentityWrite}, + {name: "DefaultIdentityWriteAny", prefix: "", check: checkDefaultIdentityWriteAny}, {name: "DefaultIntentionDefaultAllow", prefix: "foo", check: checkDefaultIntentionDefaultAllow}, {name: "DefaultIntentionRead", prefix: "foo", check: checkDefaultIntentionRead}, {name: "DefaultIntentionWrite", prefix: "foo", check: checkDefaultIntentionWrite}, @@ -187,6 +190,29 @@ func TestPolicyAuthorizer(t *testing.T) { Policy: PolicyRead, }, }, + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + { + Name: "football", + Policy: PolicyDeny, + }, + }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "foot", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + { + Name: "fo", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + }, Keys: []*KeyRule{ { Prefix: "foo", @@ -374,6 +400,22 @@ func TestPolicyAuthorizer(t *testing.T) { {name: "ServiceWriteAnyAllowed", prefix: "", check: checkAllowServiceWriteAny}, {name: "ServiceReadWithinPrefixDenied", prefix: "foot", check: checkDenyServiceReadPrefix}, + {name: "IdentityReadPrefixAllowed", prefix: "fo", check: checkAllowIdentityRead}, + {name: "IdentityWritePrefixDenied", prefix: "fo", check: checkDenyIdentityWrite}, + {name: "IdentityReadPrefixAllowed", prefix: "for", check: checkAllowIdentityRead}, + {name: "IdentityWritePrefixDenied", prefix: "for", check: checkDenyIdentityWrite}, + {name: "IdentityReadAllowed", prefix: "foo", check: checkAllowIdentityRead}, + {name: "IdentityWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite}, + {name: "IdentityReadPrefixAllowed", prefix: "foot", check: checkAllowIdentityRead}, + {name: "IdentityWritePrefixDenied", prefix: "foot", check: checkDenyIdentityWrite}, + {name: "IdentityReadPrefixAllowed", prefix: "foot2", check: checkAllowIdentityRead}, + {name: "IdentityWritePrefixDenied", prefix: "foot2", check: checkDenyIdentityWrite}, + {name: "IdentityReadPrefixAllowed", prefix: "food", check: checkAllowIdentityRead}, + {name: "IdentityWritePrefixDenied", prefix: "food", check: checkDenyIdentityWrite}, + {name: "IdentityReadDenied", prefix: "football", check: checkDenyIdentityRead}, + {name: "IdentityWriteDenied", prefix: "football", check: checkDenyIdentityWrite}, + {name: "IdentityWriteAnyAllowed", prefix: "", check: checkAllowIdentityWriteAny}, + {name: "IntentionReadPrefixAllowed", prefix: "fo", check: checkAllowIntentionRead}, {name: "IntentionWritePrefixDenied", prefix: "fo", check: checkDenyIntentionWrite}, {name: "IntentionReadPrefixAllowed", prefix: "for", check: checkAllowIntentionRead}, diff --git a/acl/policy_test.go b/acl/policy_test.go index 2ce0b32892fbe..599c8c977e1b9 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -42,6 +42,12 @@ func TestPolicySourceParse(t *testing.T) { event "bar" { policy = "deny" } + identity_prefix "" { + policy = "write" + } + identity "foo" { + policy = "read" + } key_prefix "" { policy = "read" } @@ -117,6 +123,16 @@ func TestPolicySourceParse(t *testing.T) { "policy": "deny" } }, + "identity_prefix": { + "": { + "policy": "write" + } + }, + "identity": { + "foo": { + "policy": "read" + } + }, "key_prefix": { "": { "policy": "read" @@ -217,6 +233,18 @@ func TestPolicySourceParse(t *testing.T) { Policy: PolicyDeny, }, }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "", + Policy: PolicyWrite, + }, + }, + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyRead, + }, + }, Keyring: PolicyDeny, KeyPrefixes: []*KeyRule{ { @@ -303,6 +331,39 @@ func TestPolicySourceParse(t *testing.T) { }, }}, }, + { + Name: "Identity No Intentions", + Rules: `identity "foo" { policy = "write" }`, + RulesJSON: `{ "identity": { "foo": { "policy": "write" }}}`, + Expected: &Policy{PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: "write", + }, + }, + }}, + }, + { + Name: "Identity Intentions", + Rules: `identity "foo" { policy = "write" intentions = "read" }`, + RulesJSON: `{ "identity": { "foo": { "policy": "write", "intentions": "read" }}}`, + Expected: &Policy{PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: "write", + Intentions: "read", + }, + }, + }}, + }, + { + Name: "Identity Intention: invalid value", + Rules: `identity "foo" { policy = "write" intentions = "foo" }`, + RulesJSON: `{ "identity": { "foo": { "policy": "write", "intentions": "foo" }}}`, + Err: "Invalid identity intentions policy", + }, { Name: "Service No Intentions", Rules: `service "foo" { policy = "write" }`, @@ -354,6 +415,18 @@ func TestPolicySourceParse(t *testing.T) { RulesJSON: `{ "agent_prefix": { "foo": { "policy": "nope" }}}`, Err: "Invalid agent_prefix policy", }, + { + Name: "Bad Policy - Identity", + Rules: `identity "foo" { policy = "nope" }`, + RulesJSON: `{ "identity": { "foo": { "policy": "nope" }}}`, + Err: "Invalid identity policy", + }, + { + Name: "Bad Policy - Identity Prefix", + Rules: `identity_prefix "foo" { policy = "nope" }`, + RulesJSON: `{ "identity_prefix": { "foo": { "policy": "nope" }}}`, + Err: "Invalid identity_prefix policy", + }, { Name: "Bad Policy - Key", Rules: `key "foo" { policy = "nope" }`, @@ -685,6 +758,109 @@ func TestMergePolicies(t *testing.T) { }, }}, }, + { + name: "Identities", + input: []*Policy{ + {PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + { + Name: "bar", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + { + Name: "baz", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "000", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + { + Name: "111", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + { + Name: "222", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + }, + }}, + {PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + { + Name: "baz", + Policy: PolicyDeny, + Intentions: PolicyDeny, + }, + }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "000", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + { + Name: "222", + Policy: PolicyDeny, + Intentions: PolicyDeny, + }, + }, + }}, + }, + expected: &Policy{PolicyRules: PolicyRules{ + Identities: []*IdentityRule{ + { + Name: "foo", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + { + Name: "bar", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + { + Name: "baz", + Policy: PolicyDeny, + Intentions: PolicyDeny, + }, + }, + IdentityPrefixes: []*IdentityRule{ + { + Name: "000", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + { + Name: "111", + Policy: PolicyRead, + Intentions: PolicyRead, + }, + { + Name: "222", + Policy: PolicyDeny, + Intentions: PolicyDeny, + }, + }, + }}, + }, { name: "Node", input: []*Policy{ diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 7b484e092b5bb..060f1f03eff47 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -15,9 +15,8 @@ import ( "time" "github.com/go-jose/go-jose/v3/jwt" - "github.com/stretchr/testify/require" - "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod/testauth" @@ -1408,7 +1407,7 @@ func TestACL_HTTP(t *testing.T) { var list map[string]api.ACLTemplatedPolicyResponse require.NoError(t, json.NewDecoder(resp.Body).Decode(&list)) - require.Len(t, list, 6) + require.Len(t, list, 7) require.Equal(t, api.ACLTemplatedPolicyResponse{ TemplateName: api.ACLTemplatedPolicyServiceName, @@ -2226,7 +2225,7 @@ func TestACL_Authorize(t *testing.T) { policyReq := structs.ACLPolicySetRequest{ Policy: structs.ACLPolicy{ Name: "test", - Rules: `acl = "read" operator = "write" service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `, + Rules: `acl = "read" operator = "write" identity_prefix "" { policy = "read"} service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `, }, Datacenter: "dc1", WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken}, @@ -2312,6 +2311,16 @@ func TestACL_Authorize(t *testing.T) { Segment: "foo", Access: "write", }, + { + Resource: "identity", + Segment: "foo", + Access: "read", + }, + { + Resource: "identity", + Segment: "foo", + Access: "write", + }, { Resource: "intention", Segment: "foo", @@ -2462,6 +2471,16 @@ func TestACL_Authorize(t *testing.T) { Segment: "foo", Access: "write", }, + { + Resource: "identity", + Segment: "foo", + Access: "read", + }, + { + Resource: "identity", + Segment: "foo", + Access: "write", + }, { Resource: "intention", Segment: "foo", @@ -2568,6 +2587,8 @@ func TestACL_Authorize(t *testing.T) { false, // agent:write false, // event:read false, // event:write + true, // identity:read + false, // identity:write true, // intentions:read false, // intention:write false, // key:read diff --git a/agent/ae/ae.go b/agent/ae/ae.go index 94db2a7cf036d..f8b9a331d100c 100644 --- a/agent/ae/ae.go +++ b/agent/ae/ae.go @@ -152,6 +152,14 @@ const ( retryFullSyncState fsmState = "retryFullSync" ) +// HardDisableSync is like PauseSync but is one-way. It causes other +// Pause/Resume/Start operations to be completely ignored. +func (s *StateSyncer) HardDisableSync() { + s.pauseLock.Lock() + s.hardDisabled = true + s.pauseLock.Unlock() +} + // Run is the long running method to perform state synchronization // between local and remote servers. func (s *StateSyncer) Run() { diff --git a/agent/agent.go b/agent/agent.go index a509ea2b34459..a3916922df2fe 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -69,6 +69,7 @@ import ( "github.com/hashicorp/consul/api/watch" libdns "github.com/hashicorp/consul/internal/dnsutil" "github.com/hashicorp/consul/internal/gossip/librtt" + proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/file" @@ -638,6 +639,9 @@ func (a *Agent) Start(ctx context.Context) error { // create the state synchronization manager which performs // regular and on-demand state synchronizations (anti-entropy). a.sync = ae.NewStateSyncer(a.State, c.AEInterval, a.shutdownCh, a.logger) + if a.baseDeps.UseV2Resources() { + a.sync.HardDisableSync() + } err = validateFIPSConfig(a.config) if err != nil { @@ -669,6 +673,10 @@ func (a *Agent) Start(ctx context.Context) error { return fmt.Errorf("failed to start Consul enterprise component: %v", err) } + // proxyTracker will be used in the creation of the XDS server and also + // in the registration of the v2 xds controller + var proxyTracker *proxytracker.ProxyTracker + // Setup either the client or the server. var consulServer *consul.Server if c.ServerMode { @@ -708,7 +716,13 @@ func (a *Agent) Start(ctx context.Context) error { nil, ) - consulServer, err = consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer, incomingRPCLimiter, serverLogger) + if a.baseDeps.UseV2Resources() { + proxyTracker = proxytracker.NewProxyTracker(proxytracker.ProxyTrackerConfig{ + Logger: a.logger.Named("proxy-tracker"), + SessionLimiter: a.baseDeps.XDSStreamLimiter, + }) + } + consulServer, err = consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer, incomingRPCLimiter, serverLogger, proxyTracker) if err != nil { return fmt.Errorf("Failed to start Consul server: %v", err) } @@ -732,6 +746,10 @@ func (a *Agent) Start(ctx context.Context) error { } } } else { + if a.baseDeps.UseV2Resources() { + return fmt.Errorf("can't start agent: client agents are not supported with v2 resources") + } + // the conn is used to connect to the consul server agent conn, err := a.baseDeps.GRPCConnPool.ClientConn(a.baseDeps.RuntimeConfig.Datacenter) if err != nil { @@ -877,7 +895,7 @@ func (a *Agent) Start(ctx context.Context) error { } // Start grpc and grpc_tls servers. - if err := a.listenAndServeGRPC(consulServer); err != nil { + if err := a.listenAndServeGRPC(proxyTracker, consulServer); err != nil { return err } @@ -938,20 +956,28 @@ func (a *Agent) configureXDSServer(proxyWatcher xds.ProxyWatcher, server *consul // TODO(agentless): rather than asserting the concrete type of delegate, we // should add a method to the Delegate interface to build a ConfigSource. if server != nil { - catalogCfg := catalogproxycfg.NewConfigSource(catalogproxycfg.Config{ - NodeName: a.config.NodeName, - LocalState: a.State, - LocalConfigSource: proxyWatcher, - Manager: a.proxyConfig, - GetStore: func() catalogproxycfg.Store { return server.FSM().State() }, - Logger: a.proxyConfig.Logger.Named("server-catalog"), - SessionLimiter: a.baseDeps.XDSStreamLimiter, - }) - go func() { - <-a.shutdownCh - catalogCfg.Shutdown() - }() - proxyWatcher = catalogCfg + switch proxyWatcher.(type) { + case *proxytracker.ProxyTracker: + go func() { + <-a.shutdownCh + proxyWatcher.(*proxytracker.ProxyTracker).Shutdown() + }() + default: + catalogCfg := catalogproxycfg.NewConfigSource(catalogproxycfg.Config{ + NodeName: a.config.NodeName, + LocalState: a.State, + LocalConfigSource: proxyWatcher, + Manager: a.proxyConfig, + GetStore: func() catalogproxycfg.Store { return server.FSM().State() }, + Logger: a.proxyConfig.Logger.Named("server-catalog"), + SessionLimiter: a.baseDeps.XDSStreamLimiter, + }) + go func() { + <-a.shutdownCh + catalogCfg.Shutdown() + }() + proxyWatcher = catalogCfg + } } a.xdsServer = xds.NewServer( a.config.NodeName, @@ -965,11 +991,16 @@ func (a *Agent) configureXDSServer(proxyWatcher xds.ProxyWatcher, server *consul a.xdsServer.Register(a.externalGRPCServer) } -func (a *Agent) listenAndServeGRPC(server *consul.Server) error { +func (a *Agent) listenAndServeGRPC(proxyTracker *proxytracker.ProxyTracker, server *consul.Server) error { if len(a.config.GRPCAddrs) < 1 && len(a.config.GRPCTLSAddrs) < 1 { return nil } - var proxyWatcher xds.ProxyWatcher = localproxycfg.NewConfigSource(a.proxyConfig) + var proxyWatcher xds.ProxyWatcher + if a.baseDeps.UseV2Resources() { + proxyWatcher = proxyTracker + } else { + proxyWatcher = localproxycfg.NewConfigSource(a.proxyConfig) + } a.configureXDSServer(proxyWatcher, server) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 69551d7c36469..9fd76fae4fa01 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -21,15 +21,14 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/serf/serf" "github.com/mitchellh/hashstructure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/time/rate" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/serf/serf" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/agent/config" @@ -80,6 +79,46 @@ func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string { return svcToken.SecretID } +func TestAgentEndpointsFailInV2(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t, `experiments = ["resource-apis"]`) + + checkRequest := func(method, url string) { + t.Run(method+" "+url, func(t *testing.T) { + assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, `{}`) + }) + } + + t.Run("agent-self-with-params", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/agent/self?dc=dc1", nil) + require.NoError(t, err) + + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + _, err = io.ReadAll(resp.Body) + require.NoError(t, err) + }) + + checkRequest("PUT", "/v1/agent/maintenance") + checkRequest("GET", "/v1/agent/services") + checkRequest("GET", "/v1/agent/service/web") + checkRequest("GET", "/v1/agent/checks") + checkRequest("GET", "/v1/agent/health/service/id/web") + checkRequest("GET", "/v1/agent/health/service/name/web") + checkRequest("PUT", "/v1/agent/check/register") + checkRequest("PUT", "/v1/agent/check/deregister/web") + checkRequest("PUT", "/v1/agent/check/pass/web") + checkRequest("PUT", "/v1/agent/check/warn/web") + checkRequest("PUT", "/v1/agent/check/fail/web") + checkRequest("PUT", "/v1/agent/check/update/web") + checkRequest("PUT", "/v1/agent/service/register") + checkRequest("PUT", "/v1/agent/service/deregister/web") + checkRequest("PUT", "/v1/agent/service/maintenance/web") +} + func TestAgent_Services(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -1621,7 +1660,6 @@ func newDefaultBaseDeps(t *testing.T) BaseDeps { } func TestHTTPHandlers_AgentMetricsStream_ACLDeny(t *testing.T) { - t.Skip("this test panics without a license manager in enterprise") bd := newDefaultBaseDeps(t) bd.Tokens = new(tokenStore.Store) sink := metrics.NewInmemSink(30*time.Millisecond, time.Second) @@ -1653,7 +1691,6 @@ func TestHTTPHandlers_AgentMetricsStream_ACLDeny(t *testing.T) { } func TestHTTPHandlers_AgentMetricsStream(t *testing.T) { - t.Skip("this test panics without a license manager in enterprise") bd := newDefaultBaseDeps(t) bd.Tokens = new(tokenStore.Store) sink := metrics.NewInmemSink(20*time.Millisecond, time.Second) diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index c06efc748cb64..4aafc8a029043 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -6,17 +6,18 @@ package agent import ( "context" "fmt" + "io" "net/http" "net/http/httptest" "net/url" + "strings" "testing" "time" + "github.com/hashicorp/serf/coordinate" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/hashicorp/serf/coordinate" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" @@ -30,6 +31,49 @@ func addQueryParam(req *http.Request, param, value string) { req.URL.RawQuery = q.Encode() } +func TestCatalogEndpointsFailInV2(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t, `experiments = ["resource-apis"]`) + + checkRequest := func(method, url string) { + t.Run(method+" "+url, func(t *testing.T) { + assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}") + }) + } + + checkRequest("PUT", "/v1/catalog/register") + checkRequest("GET", "/v1/catalog/connect/") + checkRequest("PUT", "/v1/catalog/deregister") + checkRequest("GET", "/v1/catalog/datacenters") + checkRequest("GET", "/v1/catalog/nodes") + checkRequest("GET", "/v1/catalog/services") + checkRequest("GET", "/v1/catalog/service/") + checkRequest("GET", "/v1/catalog/node/") + checkRequest("GET", "/v1/catalog/node-services/") + checkRequest("GET", "/v1/catalog/gateway-services/") +} + +func assertV1CatalogEndpointDoesNotWorkWithV2(t *testing.T, a *TestAgent, method, url string, requestBody string) { + var body io.Reader + switch method { + case http.MethodPost, http.MethodPut: + body = strings.NewReader(requestBody + "\n") + } + + req, err := http.NewRequest(method, url, body) + require.NoError(t, err) + + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + + got, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Contains(t, string(got), structs.ErrUsingV2CatalogExperiment.Error()) +} + func TestCatalogRegister_PeeringRegistration(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/config/builder.go b/agent/config/builder.go index f8d0df7b5c93f..64e9120fdec77 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -22,13 +22,12 @@ import ( "time" "github.com/armon/go-metrics/prometheus" - "golang.org/x/time/rate" - "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-sockaddr/template" "github.com/hashicorp/memberlist" + "golang.org/x/time/rate" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/checks" @@ -775,7 +774,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) { if err != nil { return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %s", i, err) } - // Ensure Normalize is called before Validate for accurate validation if err := entry.Normalize(); err != nil { return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %s", i, err) } @@ -1144,6 +1142,23 @@ func (b *builder) build() (rt RuntimeConfig, err error) { return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_rate must be strictly positive, was: %v", rt.Cache.EntryFetchRate) } + // TODO(CC-6389): Remove once resource-apis is no longer considered experimental and is supported by HCP + if stringslice.Contains(rt.Experiments, consul.CatalogResourceExperimentName) && rt.IsCloudEnabled() { + // Allow override of this check for development/testing purposes. Should not be used in production + if !stringslice.Contains(rt.Experiments, consul.HCPAllowV2ResourceAPIs) { + return RuntimeConfig{}, fmt.Errorf("`experiments` cannot include 'resource-apis' when HCP `cloud` configuration is set") + } + } + + // For now, disallow usage of several v2 experiments in secondary datacenters. + if rt.ServerMode && rt.PrimaryDatacenter != rt.Datacenter { + for _, name := range rt.Experiments { + if !consul.IsExperimentAllowedOnSecondaries(name) { + return RuntimeConfig{}, fmt.Errorf("`experiments` cannot include `%s` for servers in secondary datacenters", name) + } + } + } + if rt.UIConfig.MetricsProvider == "prometheus" { // Handle defaulting for the built-in version of prometheus. if len(rt.UIConfig.MetricsProxy.PathAllowlist) == 0 { diff --git a/agent/config/builder_test.go b/agent/config/builder_test.go index 26d20bdfbacc2..a587dec132f3c 100644 --- a/agent/config/builder_test.go +++ b/agent/config/builder_test.go @@ -615,9 +615,23 @@ func TestBuilder_CheckExperimentsInSecondaryDatacenters(t *testing.T) { "primary server no experiments": { hcl: primary + `experiments = []`, }, + "primary server v2catalog": { + hcl: primary + `experiments = ["resource-apis"]`, + }, + "primary server v2tenancy": { + hcl: primary + `experiments = ["v2tenancy"]`, + }, "secondary server no experiments": { hcl: secondary + `experiments = []`, }, + "secondary server v2catalog": { + hcl: secondary + `experiments = ["resource-apis"]`, + expectErr: true, + }, + "secondary server v2tenancy": { + hcl: secondary + `experiments = ["v2tenancy"]`, + expectErr: true, + }, } for name, tc := range cases { @@ -627,6 +641,67 @@ func TestBuilder_CheckExperimentsInSecondaryDatacenters(t *testing.T) { } } +func TestBuilder_WarnCloudConfigWithResourceApis(t *testing.T) { + tests := []struct { + name string + hcl string + expectErr bool + }{ + { + name: "base_case", + hcl: ``, + }, + { + name: "resource-apis_no_cloud", + hcl: `experiments = ["resource-apis"]`, + }, + { + name: "cloud-config_no_experiments", + hcl: `cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, + }, + { + name: "cloud-config_resource-apis_experiment", + hcl: ` + experiments = ["resource-apis"] + cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, + expectErr: true, + }, + { + name: "cloud-config_other_experiment", + hcl: ` + experiments = ["test"] + cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, + }, + { + name: "cloud-config_resource-apis_experiment_override", + hcl: ` + experiments = ["resource-apis", "hcp-v2-resource-apis"] + cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`, + }, + } + for _, tc := range tests { + // using dev mode skips the need for a data dir + devMode := true + builderOpts := LoadOpts{ + DevMode: &devMode, + Overrides: []Source{ + FileSource{ + Name: "overrides", + Format: "hcl", + Data: tc.hcl, + }, + }, + } + _, err := Load(builderOpts) + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), "cannot include 'resource-apis' when HCP") + } else { + require.NoError(t, err) + } + } +} + func TestBuilder_CloudConfigWithEnvironmentVars(t *testing.T) { tests := map[string]struct { hcl string diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index a36d5f67dad93..257f320a55c18 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -6015,6 +6015,24 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { rt.RaftLogStoreConfig.WAL.SegmentSize = 64 * 1024 * 1024 }, }) + run(t, testCase{ + desc: "logstore defaults", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "experiments": ["resource-apis"] + } + `}, + hcl: []string{`experiments=["resource-apis"]`}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.RaftLogStoreConfig.Backend = consul.LogStoreBackendDefault + rt.RaftLogStoreConfig.WAL.SegmentSize = 64 * 1024 * 1024 + rt.Experiments = []string{"resource-apis"} + }, + }) run(t, testCase{ // this was a bug in the initial config commit. Specifying part of this // stanza should still result in sensible defaults for the other parts. diff --git a/agent/config_endpoint_test.go b/agent/config_endpoint_test.go index 42de720c7993b..8697b55e5bf0e 100644 --- a/agent/config_endpoint_test.go +++ b/agent/config_endpoint_test.go @@ -20,6 +20,23 @@ import ( "github.com/hashicorp/consul/testrpc" ) +func TestConfigEndpointsFailInV2(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t, `experiments = ["resource-apis"]`) + + checkRequest := func(method, url string) { + t.Run(method+" "+url, func(t *testing.T) { + assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, `{"kind":"service-defaults", "name":"web"}`) + }) + } + + checkRequest("GET", "/v1/config/service-defaults") + checkRequest("GET", "/v1/config/service-defaults/web") + checkRequest("DELETE", "/v1/config/service-defaults/web") + checkRequest("PUT", "/v1/config") +} + func TestConfig_Get(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -595,7 +612,7 @@ func TestConfig_Apply_CAS(t *testing.T) { { "Kind": "service-defaults", "Name": "foo", - "Protocol": "http" + "Protocol": "udp" } `)) req, _ = http.NewRequest("PUT", "/v1/config?cas=0", body) @@ -611,7 +628,7 @@ func TestConfig_Apply_CAS(t *testing.T) { { "Kind": "service-defaults", "Name": "foo", - "Protocol": "http" + "Protocol": "udp" } `)) req, _ = http.NewRequest("PUT", fmt.Sprintf("/v1/config?cas=%d", entry.GetRaftIndex().ModifyIndex), body) diff --git a/agent/connect/uri.go b/agent/connect/uri.go index d9d5aa037d8ae..bc898f78654f4 100644 --- a/agent/connect/uri.go +++ b/agent/connect/uri.go @@ -23,6 +23,8 @@ type CertURI interface { } var ( + spiffeIDWorkloadIdentityRegexp = regexp.MustCompile( + `^(?:/ap/([^/]+))/ns/([^/]+)/identity/([^/]+)$`) spiffeIDServiceRegexp = regexp.MustCompile( `^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`) spiffeIDAgentRegexp = regexp.MustCompile( @@ -94,6 +96,32 @@ func ParseCertURI(input *url.URL) (CertURI, error) { Datacenter: dc, Service: service, }, nil + } else if v := spiffeIDWorkloadIdentityRegexp.FindStringSubmatch(path); v != nil { + // Determine the values. We assume they're reasonable to save cycles, + // but if the raw path is not empty that means that something is + // URL encoded so we go to the slow path. + ap := v[1] + ns := v[2] + workloadIdentity := v[3] + if input.RawPath != "" { + var err error + if ap, err = url.PathUnescape(v[1]); err != nil { + return nil, fmt.Errorf("Invalid admin partition: %s", err) + } + if ns, err = url.PathUnescape(v[2]); err != nil { + return nil, fmt.Errorf("Invalid namespace: %s", err) + } + if workloadIdentity, err = url.PathUnescape(v[3]); err != nil { + return nil, fmt.Errorf("Invalid workload identity: %s", err) + } + } + + return &SpiffeIDWorkloadIdentity{ + TrustDomain: input.Host, + Partition: ap, + Namespace: ns, + WorkloadIdentity: workloadIdentity, + }, nil } else if v := spiffeIDAgentRegexp.FindStringSubmatch(path); v != nil { // Determine the values. We assume they're reasonable to save cycles, // but if the raw path is not empty that means that something is diff --git a/agent/connect/uri_service.go b/agent/connect/uri_service.go index 3be7cf4797a33..b35d1e0df437d 100644 --- a/agent/connect/uri_service.go +++ b/agent/connect/uri_service.go @@ -8,6 +8,7 @@ import ( "net/url" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto-public/pbresource" ) // SpiffeIDService is the structure to represent the SPIFFE ID for a service. @@ -52,3 +53,14 @@ func (id SpiffeIDService) uriPath() string { } return path } + +// SpiffeIDFromIdentityRef creates the SPIFFE ID from a workload identity. +// TODO (ishustava): make sure ref type is workload identity. +func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string { + return SpiffeIDWorkloadIdentity{ + TrustDomain: trustDomain, + Partition: ref.Tenancy.Partition, + Namespace: ref.Tenancy.Namespace, + WorkloadIdentity: ref.Name, + }.URI().String() +} diff --git a/agent/connect/uri_signing.go b/agent/connect/uri_signing.go index 9f2807d2ba68d..1913ae6bdfdfe 100644 --- a/agent/connect/uri_signing.go +++ b/agent/connect/uri_signing.go @@ -51,6 +51,12 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool { // worry about Unicode domains if we start allowing customisation beyond the // built-in cluster ids. return strings.ToLower(other.Host) == id.Host() + case *SpiffeIDWorkloadIdentity: + // The trust domain component of the workload identity SPIFFE ID must be an exact match for now under + // ascii case folding (since hostnames are case-insensitive). Later we might + // worry about Unicode domains if we start allowing customisation beyond the + // built-in cluster ids. + return strings.ToLower(other.TrustDomain) == id.Host() case *SpiffeIDMeshGateway: // The host component of the mesh gateway SPIFFE ID must be an exact match for now under // ascii case folding (since hostnames are case-insensitive). Later we might diff --git a/agent/connect/uri_signing_test.go b/agent/connect/uri_signing_test.go index edd3d468931ba..737ca460542b7 100644 --- a/agent/connect/uri_signing_test.go +++ b/agent/connect/uri_signing_test.go @@ -98,6 +98,30 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) { input: &SpiffeIDService{Host: TestClusterID + ".fake", Namespace: "default", Datacenter: "dc1", Service: "web"}, want: false, }, + { + name: "workload - good", + id: testSigning, + input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".consul", Namespace: "default", WorkloadIdentity: "web"}, + want: true, + }, + { + name: "workload - good mixed case", + id: testSigning, + input: &SpiffeIDWorkloadIdentity{TrustDomain: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", WorkloadIdentity: "WEB"}, + want: true, + }, + { + name: "workload - different cluster", + id: testSigning, + input: &SpiffeIDWorkloadIdentity{TrustDomain: "55555555-4444-3333-2222-111111111111.consul", Namespace: "default", WorkloadIdentity: "web"}, + want: false, + }, + { + name: "workload - different TLD", + id: testSigning, + input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".fake", Namespace: "default", WorkloadIdentity: "web"}, + want: false, + }, { name: "mesh gateway - good", id: testSigning, diff --git a/agent/connect/uri_test.go b/agent/connect/uri_test.go index fcbcf42ab3a28..52116845975bb 100644 --- a/agent/connect/uri_test.go +++ b/agent/connect/uri_test.go @@ -51,6 +51,61 @@ func TestParseCertURIFromString(t *testing.T) { }, ParseError: "", }, + { + Name: "basic workload ID", + URI: "spiffe://1234.consul/ap/default/ns/default/identity/web", + Struct: &SpiffeIDWorkloadIdentity{ + TrustDomain: "1234.consul", + Partition: defaultEntMeta.PartitionOrDefault(), + Namespace: "default", + WorkloadIdentity: "web", + }, + ParseError: "", + }, + { + Name: "basic workload ID with nondefault partition", + URI: "spiffe://1234.consul/ap/bizdev/ns/default/identity/web", + Struct: &SpiffeIDWorkloadIdentity{ + TrustDomain: "1234.consul", + Partition: "bizdev", + Namespace: "default", + WorkloadIdentity: "web", + }, + ParseError: "", + }, + { + Name: "workload ID error - missing identity", + URI: "spiffe://1234.consul/ns/default", + Struct: &SpiffeIDWorkloadIdentity{ + TrustDomain: "1234.consul", + Partition: defaultEntMeta.PartitionOrDefault(), + Namespace: "default", + WorkloadIdentity: "web", + }, + ParseError: "SPIFFE ID is not in the expected format", + }, + { + Name: "workload ID error - missing partition", + URI: "spiffe://1234.consul/ns/default/identity/web", + Struct: &SpiffeIDWorkloadIdentity{ + TrustDomain: "1234.consul", + Partition: defaultEntMeta.PartitionOrDefault(), + Namespace: "default", + WorkloadIdentity: "web", + }, + ParseError: "SPIFFE ID is not in the expected format", + }, + { + Name: "workload ID error - missing namespace", + URI: "spiffe://1234.consul/ap/default/identity/web", + Struct: &SpiffeIDWorkloadIdentity{ + TrustDomain: "1234.consul", + Partition: defaultEntMeta.PartitionOrDefault(), + Namespace: "default", + WorkloadIdentity: "web", + }, + ParseError: "SPIFFE ID is not in the expected format", + }, { Name: "basic agent ID", URI: "spiffe://1234.consul/agent/client/dc/dc1/id/uuid", diff --git a/agent/connect/uri_workload_identity.go b/agent/connect/uri_workload_identity.go new file mode 100644 index 0000000000000..83e022bde69e3 --- /dev/null +++ b/agent/connect/uri_workload_identity.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package connect + +import ( + "fmt" + "net/url" +) + +// SpiffeIDWorkloadIdentity is the structure to represent the SPIFFE ID for a workload. +type SpiffeIDWorkloadIdentity struct { + TrustDomain string + Partition string + Namespace string + WorkloadIdentity string +} + +// URI returns the *url.URL for this SPIFFE ID. +func (id SpiffeIDWorkloadIdentity) URI() *url.URL { + var result url.URL + result.Scheme = "spiffe" + result.Host = id.TrustDomain + result.Path = id.uriPath() + return &result +} + +func (id SpiffeIDWorkloadIdentity) uriPath() string { + // Although CE has no support for partitions, it still needs to be able to + // handle exportedPartition from peered Consul Enterprise clusters in order + // to generate the correct SpiffeID. + // We intentionally avoid using pbpartition.DefaultName here to be CE friendly. + path := fmt.Sprintf("/ap/%s/ns/%s/identity/%s", + id.Partition, + id.Namespace, + id.WorkloadIdentity, + ) + + return path +} diff --git a/agent/connect/uri_workload_identity_ce.go b/agent/connect/uri_workload_identity_ce.go new file mode 100644 index 0000000000000..03505616341ee --- /dev/null +++ b/agent/connect/uri_workload_identity_ce.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !consulent + +package connect + +import ( + "github.com/hashicorp/consul/acl" +) + +// TODO: this will need to somehow be updated to set namespace here when we include namespaces in CE + +// GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDWorkloadIdentity. +// in CE this just returns an empty (but never nil) struct pointer +func (id SpiffeIDWorkloadIdentity) GetEnterpriseMeta() *acl.EnterpriseMeta { + return &acl.EnterpriseMeta{} +} diff --git a/agent/connect/uri_workload_identity_test.go b/agent/connect/uri_workload_identity_test.go new file mode 100644 index 0000000000000..94beb80f584bb --- /dev/null +++ b/agent/connect/uri_workload_identity_test.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package connect + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSpiffeIDWorkloadURI(t *testing.T) { + t.Run("spiffe id workload uri default tenancy", func(t *testing.T) { + wl := &SpiffeIDWorkloadIdentity{ + TrustDomain: "1234.consul", + WorkloadIdentity: "web", + Partition: "default", + Namespace: "default", + } + require.Equal(t, "spiffe://1234.consul/ap/default/ns/default/identity/web", wl.URI().String()) + }) + t.Run("spiffe id workload uri non-default tenancy", func(t *testing.T) { + wl := &SpiffeIDWorkloadIdentity{ + TrustDomain: "1234.consul", + WorkloadIdentity: "web", + Partition: "part1", + Namespace: "dev", + } + require.Equal(t, "spiffe://1234.consul/ap/part1/ns/dev/identity/web", wl.URI().String()) + }) +} diff --git a/agent/consul/config_endpoint.go b/agent/consul/config_endpoint.go index 96906dac6824a..a78859c35058e 100644 --- a/agent/consul/config_endpoint.go +++ b/agent/consul/config_endpoint.go @@ -10,11 +10,10 @@ import ( metrics "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" - hashstructure_v2 "github.com/mitchellh/hashstructure/v2" - "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" + hashstructure_v2 "github.com/mitchellh/hashstructure/v2" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" @@ -86,7 +85,6 @@ func (c *ConfigEntry) Apply(args *structs.ConfigEntryRequest, reply *bool) error } // Normalize and validate the incoming config entry as if it came from a user. - // Ensure Normalize is called before Validate for accurate validation if err := args.Entry.Normalize(); err != nil { return err } diff --git a/agent/consul/config_replication_test.go b/agent/consul/config_replication_test.go index 3117e046a4635..e2c4fbee8d8a1 100644 --- a/agent/consul/config_replication_test.go +++ b/agent/consul/config_replication_test.go @@ -6,11 +6,11 @@ package consul import ( "context" "fmt" + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/assert" "os" "testing" - "github.com/oklog/ulid/v2" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/structs" @@ -129,7 +129,7 @@ func TestReplication_ConfigEntries(t *testing.T) { Entry: &structs.ServiceConfigEntry{ Kind: structs.ServiceDefaults, Name: fmt.Sprintf("svc-%d", i), - Protocol: "tcp", + Protocol: "udp", }, } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 8b0db34b2356c..f8340d2b32c5e 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -5,6 +5,7 @@ package consul import ( "context" + "errors" "fmt" "net" "strconv" @@ -15,7 +16,10 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" + "github.com/google/go-cmp/cmp" + "github.com/oklog/ulid/v2" "golang.org/x/time/rate" + "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" @@ -28,8 +32,13 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs/aclfilter" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/logging" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) var LeaderSummaries = []prometheus.SummaryDefinition{ @@ -340,6 +349,18 @@ func (s *Server) establishLeadership(ctx context.Context) error { s.startLogVerification(ctx) } + if s.useV2Tenancy { + if err := s.initTenancy(ctx, s.storageBackend); err != nil { + return err + } + } + + if s.useV2Resources { + if err := s.initConsulService(ctx, pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan)); err != nil { + return err + } + } + if s.config.Reporting.License.Enabled && s.reportingManager != nil { s.reportingManager.StartReportingAgent() } @@ -750,6 +771,12 @@ func (s *Server) runACLReplicator( index, exit, err := replicateFunc(ctx, logger, lastRemoteIndex) if exit { + metrics.SetGauge([]string{"leader", "replication", metricName, "status"}, + 0, + ) + metrics.SetGauge([]string{"leader", "replication", metricName, "index"}, + 0, + ) return nil } @@ -1289,3 +1316,121 @@ func (s *serversIntentionsAsConfigEntriesInfo) update(srv *metadata.Server) bool // prevent continuing server evaluation return false } + +func (s *Server) initConsulService(ctx context.Context, client pbresource.ResourceServiceClient) error { + service := &pbcatalog.Service{ + Workloads: &pbcatalog.WorkloadSelector{ + Prefixes: []string{consulWorkloadPrefix}, + }, + Ports: []*pbcatalog.ServicePort{ + { + TargetPort: consulPortNameServer, + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + // No virtual port defined for now, as we assume this is generally for Service Discovery + }, + }, + } + + serviceData, err := anypb.New(service) + if err != nil { + return fmt.Errorf("could not convert Service to `any` message: %w", err) + } + + // create a default namespace in default partition + serviceID := &pbresource.ID{ + Type: pbcatalog.ServiceType, + Name: structs.ConsulServiceName, + Tenancy: resource.DefaultNamespacedTenancy(), + } + + serviceResource := &pbresource.Resource{ + Id: serviceID, + Data: serviceData, + } + + res, err := client.Read(ctx, &pbresource.ReadRequest{Id: serviceID}) + if err != nil && !grpcNotFoundErr(err) { + return fmt.Errorf("failed to read the %s Service: %w", structs.ConsulServiceName, err) + } + + if err == nil { + existingService := res.GetResource() + s.logger.Debug("existingService consul Service found") + + // If the Service is identical, we're done. + if cmp.Equal(serviceResource, existingService, resourceCmpOptions...) { + s.logger.Debug("no updates to perform on consul Service") + return nil + } + + // If the existing Service is different, add the Version to the patch for CAS write. + serviceResource.Id = existingService.Id + serviceResource.Version = existingService.Version + } + + _, err = client.Write(ctx, &pbresource.WriteRequest{Resource: serviceResource}) + if err != nil { + return fmt.Errorf("failed to create the %s service: %w", structs.ConsulServiceName, err) + } + + s.logger.Info("Created consul Service in catalog") + return nil +} + +func (s *Server) initTenancy(ctx context.Context, b storage.Backend) error { + // we write these defaults directly to the storage backend + // without going through the resource service since tenancy + // validation hooks block writes to the default namespace + // and partition. + if err := s.createDefaultPartition(ctx, b); err != nil { + return err + } + + if err := s.createDefaultNamespace(ctx, b); err != nil { + return err + } + return nil +} + +func (s *Server) createDefaultNamespace(ctx context.Context, b storage.Backend) error { + readID := &pbresource.ID{ + Type: pbtenancy.NamespaceType, + Name: resource.DefaultNamespaceName, + Tenancy: resource.DefaultPartitionedTenancy(), + } + + read, err := b.Read(ctx, storage.StrongConsistency, readID) + + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to read the %q namespace: %v", resource.DefaultNamespaceName, err) + } + if read == nil && errors.Is(err, storage.ErrNotFound) { + nsData, err := anypb.New(&pbtenancy.Namespace{Description: "default namespace in default partition"}) + if err != nil { + return err + } + + // create a default namespace in default partition + nsID := &pbresource.ID{ + Type: pbtenancy.NamespaceType, + Name: resource.DefaultNamespaceName, + Tenancy: resource.DefaultPartitionedTenancy(), + Uid: ulid.Make().String(), + } + + _, err = b.WriteCAS(ctx, &pbresource.Resource{ + Id: nsID, + Generation: ulid.Make().String(), + Data: nsData, + Metadata: map[string]string{ + "generated_at": time.Now().Format(time.RFC3339), + }, + }) + + if err != nil { + return fmt.Errorf("failed to create the %q namespace: %v", resource.DefaultNamespaceName, err) + } + } + s.logger.Info("Created", "namespace", resource.DefaultNamespaceName) + return nil +} diff --git a/agent/consul/leader_ce.go b/agent/consul/leader_ce.go new file mode 100644 index 0000000000000..2d67b7bdedd8f --- /dev/null +++ b/agent/consul/leader_ce.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !consulent + +package consul + +import ( + "context" + + "github.com/hashicorp/consul/internal/storage" +) + +func (s *Server) createDefaultPartition(ctx context.Context, b storage.Backend) error { + // no-op + return nil +} diff --git a/agent/consul/leader_ce_test.go b/agent/consul/leader_ce_test.go index 367a9fbcae83b..79e3cbc61a8f6 100644 --- a/agent/consul/leader_ce_test.go +++ b/agent/consul/leader_ce_test.go @@ -6,7 +6,17 @@ package consul import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/internal/gossip/libserf" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/storage" + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" + "github.com/hashicorp/consul/testrpc" ) func updateSerfTags(s *Server, key, value string) { @@ -16,3 +26,41 @@ func updateSerfTags(s *Server, key, value string) { libserf.UpdateTag(s.serfWAN, key, value) } } + +func TestServer_InitTenancy(t *testing.T) { + t.Parallel() + + _, conf := testServerConfig(t) + deps := newDefaultDeps(t, conf) + deps.Experiments = []string{"v2tenancy"} + deps.Registry = NewTypeRegistry() + + s, err := newServerWithDeps(t, conf, deps) + require.NoError(t, err) + + // first initTenancy call happens here + waitForLeaderEstablishment(t, s) + testrpc.WaitForLeader(t, s.RPC, "dc1") + + nsID := &pbresource.ID{ + Type: pbtenancy.NamespaceType, + Tenancy: resource.DefaultPartitionedTenancy(), + Name: resource.DefaultNamespaceName, + } + + ns, err := s.storageBackend.Read(context.Background(), storage.StrongConsistency, nsID) + require.NoError(t, err) + require.Equal(t, resource.DefaultNamespaceName, ns.Id.Name) + + // explicitly call initiTenancy to verify we do not re-create namespace + err = s.initTenancy(context.Background(), s.storageBackend) + require.NoError(t, err) + + // read again + actual, err := s.storageBackend.Read(context.Background(), storage.StrongConsistency, nsID) + require.NoError(t, err) + + require.Equal(t, ns.Id.Uid, actual.Id.Uid) + require.Equal(t, ns.Generation, actual.Generation) + require.Equal(t, ns.Version, actual.Version) +} diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index ee6562912fe57..92cdf40a6abd2 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -14,10 +14,9 @@ import ( "sync" "time" - "golang.org/x/time/rate" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" + "golang.org/x/time/rate" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" @@ -1456,6 +1455,11 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+ "we are %s", v.Datacenter, dc) } + case *connect.SpiffeIDWorkloadIdentity: + v.GetEnterpriseMeta().FillAuthzContext(&authzContext) + if err := allow.IdentityWriteAllowed(v.WorkloadIdentity, &authzContext); err != nil { + return nil, err + } case *connect.SpiffeIDAgent: v.GetEnterpriseMeta().FillAuthzContext(&authzContext) if err := allow.NodeWriteAllowed(v.Agent, &authzContext); err != nil { @@ -1516,6 +1520,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent) serverID, isServer := spiffeID.(*connect.SpiffeIDServer) mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway) + wID, isWorkloadIdentity := spiffeID.(*connect.SpiffeIDWorkloadIdentity) var entMeta acl.EnterpriseMeta switch { @@ -1525,6 +1530,12 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne "we are %s", serviceID.Host, signingID.Host()) } entMeta.Merge(serviceID.GetEnterpriseMeta()) + case isWorkloadIdentity: + if !signingID.CanSign(spiffeID) { + return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+ + "we are %s", wID.TrustDomain, signingID.Host()) + } + entMeta.Merge(wID.GetEnterpriseMeta()) case isMeshGateway: if !signingID.CanSign(spiffeID) { return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+ @@ -1647,6 +1658,9 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne case isService: reply.Service = serviceID.Service reply.ServiceURI = cert.URIs[0].String() + case isWorkloadIdentity: + reply.WorkloadIdentity = wID.WorkloadIdentity + reply.WorkloadIdentityURI = cert.URIs[0].String() case isMeshGateway: reply.Kind = structs.ServiceKindMeshGateway reply.KindURI = cert.URIs[0].String() diff --git a/agent/consul/leader_connect_ca_test.go b/agent/consul/leader_connect_ca_test.go index 4560e97380c4c..e372c010a7064 100644 --- a/agent/consul/leader_connect_ca_test.go +++ b/agent/consul/leader_connect_ca_test.go @@ -19,13 +19,12 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/consul-net-rpc/net/rpc" vaultapi "github.com/hashicorp/vault/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" @@ -567,7 +566,7 @@ func TestCAManager_Initialize_Logging(t *testing.T) { deps := newDefaultDeps(t, conf1) deps.Logger = logger - s1, err := NewServer(conf1, deps, grpc.NewServer(), nil, logger) + s1, err := NewServer(conf1, deps, grpc.NewServer(), nil, logger, nil) require.NoError(t, err) defer s1.Shutdown() testrpc.WaitForLeader(t, s1.RPC, "dc1") @@ -1318,6 +1317,12 @@ func TestCAManager_AuthorizeAndSignCertificate(t *testing.T) { Host: "test-host", Partition: "test-partition", }.URI() + identityURL := connect.SpiffeIDWorkloadIdentity{ + TrustDomain: "test-trust-domain", + Partition: "test-partition", + Namespace: "test-namespace", + WorkloadIdentity: "test-workload-identity", + }.URI() tests := []struct { name string @@ -1413,6 +1418,15 @@ func TestCAManager_AuthorizeAndSignCertificate(t *testing.T) { } }, }, + { + name: "err_identity_write_not_allowed", + expectErr: "Permission denied", + getCSR: func() *x509.CertificateRequest { + return &x509.CertificateRequest{ + URIs: []*url.URL{identityURL}, + } + }, + }, } for _, tc := range tests { diff --git a/agent/consul/leader_registrator_v2.go b/agent/consul/leader_registrator_v2.go new file mode 100644 index 0000000000000..671fcc85d1287 --- /dev/null +++ b/agent/consul/leader_registrator_v2.go @@ -0,0 +1,411 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package consul + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/serf/serf" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/types" +) + +const ( + consulWorkloadPrefix = "consul-server-" + consulPortNameServer = "server" +) + +var _ ConsulRegistrator = (*V2ConsulRegistrator)(nil) + +var resourceCmpOptions = []cmp.Option{ + protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"), + protocmp.IgnoreFields(&pbresource.ID{}, "uid"), + protocmp.Transform(), + // Stringify any type passed to the sorter so that we can reliably compare most values. + cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }), +} + +type V2ConsulRegistrator struct { + Logger hclog.Logger + NodeName string + EntMeta *acl.EnterpriseMeta + + Client pbresource.ResourceServiceClient +} + +// HandleAliveMember is used to ensure the server is registered as a Workload +// with a passing health check. +func (r V2ConsulRegistrator) HandleAliveMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, joinServer func(m serf.Member, parts *metadata.Server) error) error { + valid, parts := metadata.IsConsulServer(member) + if !valid { + return nil + } + + if nodeEntMeta == nil { + nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + + // Attempt to join the consul server, regardless of the existing catalog state + if err := joinServer(member, parts); err != nil { + return err + } + + r.Logger.Info("member joined, creating catalog entries", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + + workloadResource, err := r.createWorkloadFromMember(member, parts, nodeEntMeta) + if err != nil { + return err + } + + // Check if the Workload already exists and if it's the same + res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadResource.Id}) + if err != nil && !grpcNotFoundErr(err) { + return fmt.Errorf("error checking for existing Workload %s: %w", workloadResource.Id.Name, err) + } + + if err == nil { + existingWorkload := res.GetResource() + + r.Logger.Debug("existing Workload matching the member found", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + + // If the Workload is identical, move to updating the health status + if cmp.Equal(workloadResource, existingWorkload, resourceCmpOptions...) { + r.Logger.Debug("no updates to perform on member Workload", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + goto HEALTHSTATUS + } + + // If the existing Workload different, add the existing Version into the patch for CAS write + workloadResource.Id = existingWorkload.Id + workloadResource.Version = existingWorkload.Version + } + + if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: workloadResource}); err != nil { + return fmt.Errorf("failed to write Workload %s: %w", workloadResource.Id.Name, err) + } + + r.Logger.Info("updated consul Workload in catalog", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + +HEALTHSTATUS: + hsResource, err := r.createHealthStatusFromMember(member, workloadResource.Id, true, nodeEntMeta) + if err != nil { + return err + } + + // Check if the HealthStatus already exists and if it's the same + res, err = r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: hsResource.Id}) + if err != nil && !grpcNotFoundErr(err) { + return fmt.Errorf("error checking for existing HealthStatus %s: %w", hsResource.Id.Name, err) + } + + if err == nil { + existingHS := res.GetResource() + + r.Logger.Debug("existing HealthStatus matching the member found", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + + // If the HealthStatus is identical, we're done. + if cmp.Equal(hsResource, existingHS, resourceCmpOptions...) { + r.Logger.Debug("no updates to perform on member HealthStatus", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil + } + + // If the existing HealthStatus is different, add the Version to the patch for CAS write. + hsResource.Id = existingHS.Id + hsResource.Version = existingHS.Version + } + + if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: hsResource}); err != nil { + return fmt.Errorf("failed to write HealthStatus %s: %w", hsResource.Id.Name, err) + } + r.Logger.Info("updated consul HealthStatus in catalog", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil +} + +func (r V2ConsulRegistrator) createWorkloadFromMember(member serf.Member, parts *metadata.Server, nodeEntMeta *acl.EnterpriseMeta) (*pbresource.Resource, error) { + workloadMeta := map[string]string{ + "read_replica": strconv.FormatBool(member.Tags["read_replica"] == "1"), + "raft_version": strconv.Itoa(parts.RaftVersion), + "serf_protocol_current": strconv.FormatUint(uint64(member.ProtocolCur), 10), + "serf_protocol_min": strconv.FormatUint(uint64(member.ProtocolMin), 10), + "serf_protocol_max": strconv.FormatUint(uint64(member.ProtocolMax), 10), + "version": parts.Build.String(), + } + + if parts.ExternalGRPCPort > 0 { + workloadMeta["grpc_port"] = strconv.Itoa(parts.ExternalGRPCPort) + } + if parts.ExternalGRPCTLSPort > 0 { + workloadMeta["grpc_tls_port"] = strconv.Itoa(parts.ExternalGRPCTLSPort) + } + + if parts.Port < 0 || parts.Port > 65535 { + return nil, fmt.Errorf("invalid port: %d", parts.Port) + } + + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: member.Addr.String(), Ports: []string{consulPortNameServer}}, + }, + // Don't include identity since Consul is not routable through the mesh. + // Don't include locality because these values are not passed along through serf, and they are probably + // different from the leader's values. + Ports: map[string]*pbcatalog.WorkloadPort{ + consulPortNameServer: { + Port: uint32(parts.Port), + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + // TODO: add other agent ports + }, + } + + workloadData, err := anypb.New(workload) + if err != nil { + return nil, fmt.Errorf("could not convert Workload to 'any' type: %w", err) + } + + workloadId := &pbresource.ID{ + Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), + Type: pbcatalog.WorkloadType, + Tenancy: resource.DefaultNamespacedTenancy(), + } + workloadId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() + + return &pbresource.Resource{ + Id: workloadId, + Data: workloadData, + Metadata: workloadMeta, + }, nil +} + +func (r V2ConsulRegistrator) createHealthStatusFromMember(member serf.Member, workloadId *pbresource.ID, passing bool, nodeEntMeta *acl.EnterpriseMeta) (*pbresource.Resource, error) { + hs := &pbcatalog.HealthStatus{ + Type: string(structs.SerfCheckID), + Description: structs.SerfCheckName, + } + + if passing { + hs.Status = pbcatalog.Health_HEALTH_PASSING + hs.Output = structs.SerfCheckAliveOutput + } else { + hs.Status = pbcatalog.Health_HEALTH_CRITICAL + hs.Output = structs.SerfCheckFailedOutput + } + + hsData, err := anypb.New(hs) + if err != nil { + return nil, fmt.Errorf("could not convert HealthStatus to 'any' type: %w", err) + } + + hsId := &pbresource.ID{ + Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), + Type: pbcatalog.HealthStatusType, + Tenancy: resource.DefaultNamespacedTenancy(), + } + hsId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() + + return &pbresource.Resource{ + Id: hsId, + Data: hsData, + Owner: workloadId, + }, nil +} + +// HandleFailedMember is used to mark the workload's associated HealthStatus. +func (r V2ConsulRegistrator) HandleFailedMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error { + if valid, _ := metadata.IsConsulServer(member); !valid { + return nil + } + + if nodeEntMeta == nil { + nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + + r.Logger.Info("member failed", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + + // Validate that the associated workload exists + workloadId := &pbresource.ID{ + Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), + Type: pbcatalog.WorkloadType, + Tenancy: resource.DefaultNamespacedTenancy(), + } + workloadId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() + + res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadId}) + if err != nil && !grpcNotFoundErr(err) { + return fmt.Errorf("error checking for existing Workload %s: %w", workloadId.Name, err) + } + if grpcNotFoundErr(err) { + r.Logger.Info("ignoring failed event for member because it does not exist in the catalog", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil + } + // Overwrite the workload ID with the one that has UID populated. + existingWorkload := res.GetResource() + + hsResource, err := r.createHealthStatusFromMember(member, existingWorkload.Id, false, nodeEntMeta) + if err != nil { + return err + } + + res, err = r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: hsResource.Id}) + if err != nil && !grpcNotFoundErr(err) { + return fmt.Errorf("error checking for existing HealthStatus %s: %w", hsResource.Id.Name, err) + } + + if err == nil { + existingHS := res.GetResource() + r.Logger.Debug("existing HealthStatus matching the member found", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + + // If the HealthStatus is identical, we're done. + if cmp.Equal(hsResource, existingHS, resourceCmpOptions...) { + r.Logger.Debug("no updates to perform on member HealthStatus", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil + } + + // If the existing HealthStatus is different, add the Version to the patch for CAS write. + hsResource.Id = existingHS.Id + hsResource.Version = existingHS.Version + } + + if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: hsResource}); err != nil { + return fmt.Errorf("failed to write HealthStatus %s: %w", hsResource.Id.Name, err) + } + r.Logger.Info("updated consul HealthStatus in catalog", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil +} + +// HandleLeftMember is used to handle members that gracefully +// left. They are removed if necessary. +func (r V2ConsulRegistrator) HandleLeftMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { + return r.handleDeregisterMember("left", member, nodeEntMeta, removeServerFunc) +} + +// HandleReapMember is used to handle members that have been +// reaped after a prolonged failure. They are removed from the catalog. +func (r V2ConsulRegistrator) HandleReapMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { + return r.handleDeregisterMember("reaped", member, nodeEntMeta, removeServerFunc) +} + +// handleDeregisterMember is used to remove a member of a given reason +func (r V2ConsulRegistrator) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error { + if valid, _ := metadata.IsConsulServer(member); !valid { + return nil + } + + if nodeEntMeta == nil { + nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + + r.Logger.Info("removing member", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + "reason", reason, + ) + + if err := removeServerFunc(member); err != nil { + return err + } + + // Do not remove our self. This can only happen if the current leader + // is leaving. Instead, we should allow a follower to take-over and + // remove us later. + if strings.EqualFold(member.Name, r.NodeName) && + strings.EqualFold(nodeEntMeta.PartitionOrDefault(), r.EntMeta.PartitionOrDefault()) { + r.Logger.Warn("removing self should be done by follower", + "name", r.NodeName, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + "reason", reason, + ) + return nil + } + + // Check if the workload exists + workloadID := &pbresource.ID{ + Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])), + Type: pbcatalog.WorkloadType, + Tenancy: resource.DefaultNamespacedTenancy(), + } + workloadID.Tenancy.Partition = nodeEntMeta.PartitionOrDefault() + + res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadID}) + if err != nil && !grpcNotFoundErr(err) { + return fmt.Errorf("error checking for existing Workload %s: %w", workloadID.Name, err) + } + if grpcNotFoundErr(err) { + r.Logger.Info("ignoring reap event for member because it does not exist in the catalog", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return nil + } + existingWorkload := res.GetResource() + + // The HealthStatus should be reaped automatically + if _, err := r.Client.Delete(context.TODO(), &pbresource.DeleteRequest{Id: existingWorkload.Id}); err != nil { + return fmt.Errorf("failed to delete Workload %s: %w", existingWorkload.Id.Name, err) + } + r.Logger.Info("deleted consul Workload", + "member", member.Name, + "partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(), + ) + return err +} + +func grpcNotFoundErr(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + return ok && s.Code() == codes.NotFound +} diff --git a/agent/consul/leader_registrator_v2_test.go b/agent/consul/leader_registrator_v2_test.go new file mode 100644 index 0000000000000..c2729c47fff8a --- /dev/null +++ b/agent/consul/leader_registrator_v2_test.go @@ -0,0 +1,583 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package consul + +import ( + "fmt" + "net" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/agent/structs" + mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +var ( + fakeWrappedErr = fmt.Errorf("fake test error") +) + +type testCase struct { + name string + member serf.Member + nodeNameOverride string // This is used in the HandleLeftMember test to avoid deregistering ourself + + existingWorkload *pbresource.Resource + workloadReadErr bool + workloadWriteErr bool + workloadDeleteErr bool + + existingHealthStatus *pbresource.Resource + healthstatusReadErr bool + healthstatusWriteErr bool + + mutatedWorkload *pbresource.Resource // leaving one of these out means the mock expects not to have a write/delete called + mutatedHealthStatus *pbresource.Resource + expErr string +} + +func Test_HandleAliveMember(t *testing.T) { + t.Parallel() + + run := func(t *testing.T, tt testCase) { + client := mockpbresource.NewResourceServiceClient(t) + mockClient := client.EXPECT() + + // Build mock expectations based on the order of HandleAliveMember resource calls + setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr) + setupWriteExpectation(t, mockClient, tt.mutatedWorkload, tt.workloadWriteErr) + if !tt.workloadReadErr && !tt.workloadWriteErr { + // We expect to bail before this read if there is an error earlier in the function + setupReadExpectation(t, mockClient, getTestHealthstatusId(), tt.existingHealthStatus, tt.healthstatusReadErr) + } + setupWriteExpectation(t, mockClient, tt.mutatedHealthStatus, tt.healthstatusWriteErr) + + registrator := V2ConsulRegistrator{ + Logger: hclog.New(&hclog.LoggerOptions{}), + NodeName: "test-server-1", + Client: client, + } + + // Mock join function + var joinMockCalled bool + joinMock := func(_ serf.Member, _ *metadata.Server) error { + joinMockCalled = true + return nil + } + + err := registrator.HandleAliveMember(tt.member, acl.DefaultEnterpriseMeta(), joinMock) + if tt.expErr != "" { + require.Contains(t, err.Error(), tt.expErr) + } else { + require.NoError(t, err) + } + require.True(t, joinMockCalled, "the mock join function was not called") + } + + tests := []testCase{ + { + name: "New alive member", + member: getTestSerfMember(serf.StatusAlive), + mutatedWorkload: getTestWorkload(t), + mutatedHealthStatus: getTestHealthStatus(t, true), + }, + { + name: "No updates needed", + member: getTestSerfMember(serf.StatusAlive), + existingWorkload: getTestWorkload(t), + existingHealthStatus: getTestHealthStatus(t, true), + }, + { + name: "Existing Workload and HS need to be updated", + member: getTestSerfMember(serf.StatusAlive), + existingWorkload: getTestWorkloadWithPort(t, 8301), + existingHealthStatus: getTestHealthStatus(t, false), + mutatedWorkload: getTestWorkload(t), + mutatedHealthStatus: getTestHealthStatus(t, true), + }, + { + name: "Only the HS needs to be updated", + member: getTestSerfMember(serf.StatusAlive), + existingWorkload: getTestWorkload(t), + existingHealthStatus: getTestHealthStatus(t, false), + mutatedHealthStatus: getTestHealthStatus(t, true), + }, + { + name: "Error reading Workload", + member: getTestSerfMember(serf.StatusAlive), + workloadReadErr: true, + expErr: "error checking for existing Workload", + }, + { + name: "Error writing Workload", + member: getTestSerfMember(serf.StatusAlive), + workloadWriteErr: true, + mutatedWorkload: getTestWorkload(t), + expErr: "failed to write Workload", + }, + { + name: "Error reading HealthStatus", + member: getTestSerfMember(serf.StatusAlive), + healthstatusReadErr: true, + mutatedWorkload: getTestWorkload(t), + expErr: "error checking for existing HealthStatus", + }, + { + name: "Error writing HealthStatus", + member: getTestSerfMember(serf.StatusAlive), + healthstatusWriteErr: true, + mutatedWorkload: getTestWorkload(t), + mutatedHealthStatus: getTestHealthStatus(t, true), + expErr: "failed to write HealthStatus", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + run(t, tt) + }) + } +} + +func Test_HandleFailedMember(t *testing.T) { + t.Parallel() + + run := func(t *testing.T, tt testCase) { + client := mockpbresource.NewResourceServiceClient(t) + mockClient := client.EXPECT() + + // Build mock expectations based on the order of HandleFailed resource calls + setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr) + if !tt.workloadReadErr && tt.existingWorkload != nil { + // We expect to bail before this read if there is an error earlier in the function or there is no workload + setupReadExpectation(t, mockClient, getTestHealthstatusId(), tt.existingHealthStatus, tt.healthstatusReadErr) + } + setupWriteExpectation(t, mockClient, tt.mutatedHealthStatus, tt.healthstatusWriteErr) + + registrator := V2ConsulRegistrator{ + Logger: hclog.New(&hclog.LoggerOptions{}), + NodeName: "test-server-1", + Client: client, + } + + err := registrator.HandleFailedMember(tt.member, acl.DefaultEnterpriseMeta()) + if tt.expErr != "" { + require.Contains(t, err.Error(), tt.expErr) + } else { + require.NoError(t, err) + } + } + + tests := []testCase{ + { + name: "Update non-existent HealthStatus", + member: getTestSerfMember(serf.StatusFailed), + existingWorkload: getTestWorkload(t), + mutatedHealthStatus: getTestHealthStatus(t, false), + }, + { + name: "Underlying Workload does not exist", + member: getTestSerfMember(serf.StatusFailed), + }, + { + name: "Update an existing HealthStatus", + member: getTestSerfMember(serf.StatusFailed), + existingWorkload: getTestWorkload(t), + existingHealthStatus: getTestHealthStatus(t, true), + mutatedHealthStatus: getTestHealthStatus(t, false), + }, + { + name: "HealthStatus is already critical - no updates needed", + member: getTestSerfMember(serf.StatusFailed), + existingWorkload: getTestWorkload(t), + existingHealthStatus: getTestHealthStatus(t, false), + }, + { + name: "Error reading Workload", + member: getTestSerfMember(serf.StatusFailed), + workloadReadErr: true, + expErr: "error checking for existing Workload", + }, + { + name: "Error reading HealthStatus", + member: getTestSerfMember(serf.StatusFailed), + existingWorkload: getTestWorkload(t), + healthstatusReadErr: true, + expErr: "error checking for existing HealthStatus", + }, + { + name: "Error writing HealthStatus", + member: getTestSerfMember(serf.StatusFailed), + existingWorkload: getTestWorkload(t), + healthstatusWriteErr: true, + mutatedHealthStatus: getTestHealthStatus(t, false), + expErr: "failed to write HealthStatus", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + run(t, tt) + }) + } +} + +// Test_HandleLeftMember also tests HandleReapMembers, which are the same core logic with some different logs. +func Test_HandleLeftMember(t *testing.T) { + t.Parallel() + + run := func(t *testing.T, tt testCase) { + client := mockpbresource.NewResourceServiceClient(t) + mockClient := client.EXPECT() + + // Build mock expectations based on the order of HandleLeftMember resource calls + // We check for the override, which we use to skip self de-registration + if tt.nodeNameOverride == "" { + setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr) + if tt.existingWorkload != nil && !tt.workloadReadErr { + setupDeleteExpectation(t, mockClient, tt.mutatedWorkload, tt.workloadDeleteErr) + } + } + + nodeName := "test-server-2" // This is not the same as the serf node so we don't dergister ourself. + if tt.nodeNameOverride != "" { + nodeName = tt.nodeNameOverride + } + + registrator := V2ConsulRegistrator{ + Logger: hclog.New(&hclog.LoggerOptions{}), + NodeName: nodeName, // We change this so that we don't deregister ourself + Client: client, + } + + // Mock join function + var removeMockCalled bool + removeMock := func(_ serf.Member) error { + removeMockCalled = true + return nil + } + + err := registrator.HandleLeftMember(tt.member, acl.DefaultEnterpriseMeta(), removeMock) + if tt.expErr != "" { + require.Contains(t, err.Error(), tt.expErr) + } else { + require.NoError(t, err) + } + require.True(t, removeMockCalled, "the mock remove function was not called") + } + + tests := []testCase{ + { + name: "Remove member", + member: getTestSerfMember(serf.StatusAlive), + existingWorkload: getTestWorkload(t), + mutatedWorkload: getTestWorkload(t), + }, + { + name: "Don't deregister ourself", + member: getTestSerfMember(serf.StatusAlive), + nodeNameOverride: "test-server-1", + }, + { + name: "Don't do anything if the Workload is already gone", + member: getTestSerfMember(serf.StatusAlive), + }, + { + name: "Remove member regardless of Workload payload", + member: getTestSerfMember(serf.StatusAlive), + existingWorkload: getTestWorkloadWithPort(t, 8301), + mutatedWorkload: getTestWorkload(t), + }, + { + name: "Error reading Workload", + member: getTestSerfMember(serf.StatusAlive), + workloadReadErr: true, + expErr: "error checking for existing Workload", + }, + { + name: "Error deleting Workload", + member: getTestSerfMember(serf.StatusAlive), + workloadDeleteErr: true, + existingWorkload: getTestWorkloadWithPort(t, 8301), + mutatedWorkload: getTestWorkload(t), + expErr: "failed to delete Workload", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + run(t, tt) + }) + } +} + +func setupReadExpectation( + t *testing.T, + mockClient *mockpbresource.ResourceServiceClient_Expecter, + expectedId *pbresource.ID, + existingResource *pbresource.Resource, + sendErr bool) { + + if sendErr { + mockClient.Read(mock.Anything, mock.Anything). + Return(nil, fakeWrappedErr). + Once(). + Run(func(args mock.Arguments) { + req := args.Get(1).(*pbresource.ReadRequest) + require.True(t, proto.Equal(expectedId, req.Id)) + }) + } else if existingResource != nil { + mockClient.Read(mock.Anything, mock.Anything). + Return(&pbresource.ReadResponse{ + Resource: existingResource, + }, nil). + Once(). + Run(func(args mock.Arguments) { + req := args.Get(1).(*pbresource.ReadRequest) + require.True(t, proto.Equal(expectedId, req.Id)) + }) + } else { + mockClient.Read(mock.Anything, mock.Anything). + Return(nil, status.Error(codes.NotFound, "not found")). + Once(). + Run(func(args mock.Arguments) { + req := args.Get(1).(*pbresource.ReadRequest) + require.True(t, proto.Equal(expectedId, req.Id)) + }) + } +} + +func setupWriteExpectation( + t *testing.T, + mockClient *mockpbresource.ResourceServiceClient_Expecter, + expectedResource *pbresource.Resource, + sendErr bool) { + + // If there is no expected resource, we take that to mean we don't expect any client writes. + if expectedResource == nil { + return + } + + if sendErr { + mockClient.Write(mock.Anything, mock.Anything). + Return(nil, fakeWrappedErr). + Once(). + Run(func(args mock.Arguments) { + req := args.Get(1).(*pbresource.WriteRequest) + require.True(t, proto.Equal(expectedResource, req.Resource)) + }) + } else { + mockClient.Write(mock.Anything, mock.Anything). + Return(nil, nil). + Once(). + Run(func(args mock.Arguments) { + req := args.Get(1).(*pbresource.WriteRequest) + require.True(t, proto.Equal(expectedResource, req.Resource)) + }) + } +} + +func setupDeleteExpectation( + t *testing.T, + mockClient *mockpbresource.ResourceServiceClient_Expecter, + expectedResource *pbresource.Resource, + sendErr bool) { + + expectedId := expectedResource.GetId() + + if sendErr { + mockClient.Delete(mock.Anything, mock.Anything). + Return(nil, fakeWrappedErr). + Once(). + Run(func(args mock.Arguments) { + req := args.Get(1).(*pbresource.DeleteRequest) + require.True(t, proto.Equal(expectedId, req.Id)) + }) + } else { + mockClient.Delete(mock.Anything, mock.Anything). + Return(nil, nil). + Once(). + Run(func(args mock.Arguments) { + req := args.Get(1).(*pbresource.DeleteRequest) + require.True(t, proto.Equal(expectedId, req.Id)) + }) + } +} + +func getTestWorkload(t *testing.T) *pbresource.Resource { + return getTestWorkloadWithPort(t, 8300) +} + +func getTestWorkloadWithPort(t *testing.T, port int) *pbresource.Resource { + workload := &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "127.0.0.1", Ports: []string{consulPortNameServer}}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + consulPortNameServer: { + Port: uint32(port), + Protocol: pbcatalog.Protocol_PROTOCOL_TCP, + }, + }, + } + data, err := anypb.New(workload) + require.NoError(t, err) + + return &pbresource.Resource{ + Id: getTestWorkloadId(), + Data: data, + Metadata: map[string]string{ + "read_replica": "false", + "raft_version": "3", + "serf_protocol_current": "2", + "serf_protocol_min": "1", + "serf_protocol_max": "5", + "version": "1.18.0", + "grpc_port": "8502", + }, + } +} + +func getTestWorkloadId() *pbresource.ID { + return &pbresource.ID{ + Tenancy: resource.DefaultNamespacedTenancy(), + Type: pbcatalog.WorkloadType, + Name: "consul-server-72af047d-1857-2493-969e-53614a70b25a", + } +} + +func getTestHealthStatus(t *testing.T, passing bool) *pbresource.Resource { + healthStatus := &pbcatalog.HealthStatus{ + Type: string(structs.SerfCheckID), + Description: structs.SerfCheckName, + } + + if passing { + healthStatus.Status = pbcatalog.Health_HEALTH_PASSING + healthStatus.Output = structs.SerfCheckAliveOutput + } else { + healthStatus.Status = pbcatalog.Health_HEALTH_CRITICAL + healthStatus.Output = structs.SerfCheckFailedOutput + } + + data, err := anypb.New(healthStatus) + require.NoError(t, err) + + return &pbresource.Resource{ + Id: getTestHealthstatusId(), + Data: data, + Owner: getTestWorkloadId(), + } +} + +func getTestHealthstatusId() *pbresource.ID { + return &pbresource.ID{ + Tenancy: resource.DefaultNamespacedTenancy(), + Type: pbcatalog.HealthStatusType, + Name: "consul-server-72af047d-1857-2493-969e-53614a70b25a", + } +} + +func getTestSerfMember(status serf.MemberStatus) serf.Member { + return serf.Member{ + Name: "test-server-1", + Addr: net.ParseIP("127.0.0.1"), + Port: 8300, + // representative tags from a local dev deployment of ENT + Tags: map[string]string{ + "vsn_min": "2", + "vsn": "2", + "acls": "1", + "ft_si": "1", + "raft_vsn": "3", + "grpc_port": "8502", + "wan_join_port": "8500", + "dc": "dc1", + "segment": "", + "id": "72af047d-1857-2493-969e-53614a70b25a", + "ft_admpart": "1", + "role": "consul", + "build": "1.18.0", + "ft_ns": "1", + "vsn_max": "3", + "bootstrap": "1", + "expect": "1", + "port": "8300", + }, + Status: status, + ProtocolMin: 1, + ProtocolMax: 5, + ProtocolCur: 2, + DelegateMin: 2, + DelegateMax: 5, + DelegateCur: 4, + } +} + +// Test_ResourceCmpOptions_GeneratedFieldInsensitive makes sure are protocmp options are working as expected. +func Test_ResourceCmpOptions_GeneratedFieldInsensitive(t *testing.T) { + t.Parallel() + + res1 := getTestWorkload(t) + res2 := getTestWorkload(t) + + // Modify the generated fields + res2.Id.Uid = "123456" + res2.Version = "789" + res2.Generation = "millenial" + res2.Status = map[string]*pbresource.Status{ + "foo": {ObservedGeneration: "124"}, + } + + require.True(t, cmp.Equal(res1, res2, resourceCmpOptions...)) + + res1.Metadata["foo"] = "bar" + + require.False(t, cmp.Equal(res1, res2, resourceCmpOptions...)) +} + +// Test gRPC Error Codes Conditions +func Test_grpcNotFoundErr(t *testing.T) { + t.Parallel() + tests := []struct { + name string + err error + expected bool + }{ + { + name: "Nil Error", + }, + { + name: "Nonsense Error", + err: fmt.Errorf("boooooo!"), + }, + { + name: "gRPC Permission Denied Error", + err: status.Error(codes.PermissionDenied, "permission denied is not NotFound"), + }, + { + name: "gRPC NotFound Error", + err: status.Error(codes.NotFound, "bingo: not found"), + expected: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expected, grpcNotFoundErr(tt.err)) + }) + } +} diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index 9709e391ebcf2..619d6ae6dae1a 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -14,23 +14,84 @@ import ( "testing" "time" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" "google.golang.org/grpc" msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/serf/serf" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/structs" tokenStore "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" ) +func enableV2(t *testing.T) func(deps *Deps) { + return func(deps *Deps) { + deps.Experiments = []string{"resource-apis"} + m, _ := leafcert.NewTestManager(t, nil) + deps.LeafCertManager = m + } +} + +// Test that Consul service is created in V2. +// In V1, the service is implicitly created - this is covered in leader_registrator_v1_test.go +func Test_InitConsulService(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + dir, s := testServerWithDepsAndConfig(t, enableV2(t), + func(c *Config) { + c.PrimaryDatacenter = "dc1" + c.ACLsEnabled = true + c.ACLInitialManagementToken = "root" + c.ACLResolverSettings.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir) + defer s.Shutdown() + + testrpc.WaitForRaftLeader(t, s.RPC, "dc1", testrpc.WithToken("root")) + + client := pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan) + + consulServiceID := &pbresource.ID{ + Name: structs.ConsulServiceName, + Type: pbcatalog.ServiceType, + Tenancy: resource.DefaultNamespacedTenancy(), + } + + retry.Run(t, func(r *retry.R) { + res, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: consulServiceID}) + if err != nil { + r.Fatalf("err: %v", err) + } + data := res.GetResource().GetData() + require.NotNil(r, data) + + var service pbcatalog.Service + err = data.UnmarshalTo(&service) + require.NoError(r, err) + + // Spot check the Service + require.Equal(r, service.GetWorkloads().GetPrefixes(), []string{consulWorkloadPrefix}) + require.GreaterOrEqual(r, len(service.GetPorts()), 1) + + //Since we're not running a full agent w/ serf, we can't check for valid endpoints + }) +} + func TestLeader_TombstoneGC_Reset(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -773,7 +834,7 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) { deps := newDefaultDeps(t, config) deps.Logger = logger - srv, err := NewServer(config, deps, grpc.NewServer(), nil, logger) + srv, err := NewServer(config, deps, grpc.NewServer(), nil, logger, nil) require.NoError(t, err) defer srv.Shutdown() diff --git a/agent/consul/options.go b/agent/consul/options.go index 8c9fe05f48734..ced36bcad591b 100644 --- a/agent/consul/options.go +++ b/agent/consul/options.go @@ -6,6 +6,8 @@ package consul import ( "google.golang.org/grpc" + "github.com/hashicorp/consul/lib/stringslice" + "github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/go-hclog" @@ -48,6 +50,33 @@ type Deps struct { EnterpriseDeps } +// UseV2Resources returns true if "resource-apis" is present in the Experiments +// array of the agent config. +func (d Deps) UseV2Resources() bool { + if stringslice.Contains(d.Experiments, CatalogResourceExperimentName) { + return true + } + return false +} + +// UseV2Tenancy returns true if "v2tenancy" is present in the Experiments +// array of the agent config. +func (d Deps) UseV2Tenancy() bool { + if stringslice.Contains(d.Experiments, V2TenancyExperimentName) { + return true + } + return false +} + +// HCPAllowV2Resources returns true if "hcp-v2-resource-apis" is present in the Experiments +// array of the agent config. +func (d Deps) HCPAllowV2Resources() bool { + if stringslice.Contains(d.Experiments, HCPAllowV2ResourceAPIs) { + return true + } + return false +} + type GRPCClientConner interface { ClientConn(datacenter string) (*grpc.ClientConn, error) ClientConnLeader() (*grpc.ClientConn, error) diff --git a/agent/consul/server.go b/agent/consul/server.go index 979d9e3cd4341..12386cc9df09f 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -63,18 +63,25 @@ import ( "github.com/hashicorp/consul/agent/rpc/peering" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" + "github.com/hashicorp/consul/internal/auth" + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/gossip/librtt" hcpctl "github.com/hashicorp/consul/internal/hcp" + "github.com/hashicorp/consul/internal/mesh" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" "github.com/hashicorp/consul/internal/multicluster" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" "github.com/hashicorp/consul/internal/resource/reaper" "github.com/hashicorp/consul/internal/storage" raftstorage "github.com/hashicorp/consul/internal/storage/raft" + "github.com/hashicorp/consul/internal/tenancy" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/routine" + "github.com/hashicorp/consul/lib/stringslice" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" @@ -124,9 +131,25 @@ const ( // and wait for a periodic reconcile. reconcileChSize = 256 - LeaderTransferMinVersion = "1.6.0" + LeaderTransferMinVersion = "1.6.0" + CatalogResourceExperimentName = "resource-apis" + V2TenancyExperimentName = "v2tenancy" + HCPAllowV2ResourceAPIs = "hcp-v2-resource-apis" ) +// IsExperimentAllowedOnSecondaries returns true if an experiment is currently +// disallowed for wan federated secondary datacenters. +// +// Likely these will all be short lived exclusions. +func IsExperimentAllowedOnSecondaries(name string) bool { + switch name { + case CatalogResourceExperimentName, V2TenancyExperimentName: + return false + default: + return true + } +} + const ( aclPolicyReplicationRoutineName = "ACL policy replication" aclRoleReplicationRoutineName = "ACL role replication" @@ -451,6 +474,15 @@ type Server struct { reportingManager *reporting.ReportingManager registry resource.Registry + + useV2Resources bool + + // useV2Tenancy is tied to the "v2tenancy" feature flag. + useV2Tenancy bool + + // whether v2 resources are enabled for use with HCP + // TODO(CC-6389): Remove once resource-apis is no longer considered experimental and is supported by HCP + hcpAllowV2Resources bool } func (s *Server) DecrementBlockingQueries() uint64 { @@ -472,10 +504,22 @@ type connHandler interface { Shutdown() error } +// ProxyUpdater is an interface for ProxyTracker. +type ProxyUpdater interface { + // PushChange allows pushing a computed ProxyState to xds for xds resource generation to send to a proxy. + PushChange(id *pbresource.ID, snapshot proxysnapshot.ProxySnapshot) error + + // ProxyConnectedToServer returns whether this id is connected to this server. If it is connected, it also returns + // the token as the first argument. + ProxyConnectedToServer(id *pbresource.ID) (string, bool) + + EventChannel() chan controller.Event +} + // NewServer is used to construct a new Consul server from the configuration // and extra options, potentially returning an error. func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, - incomingRPCLimiter rpcRate.RequestLimitsHandler, serverLogger hclog.InterceptLogger) (*Server, error) { + incomingRPCLimiter rpcRate.RequestLimitsHandler, serverLogger hclog.InterceptLogger, proxyUpdater ProxyUpdater) (*Server, error) { logger := flat.Logger if err := config.CheckProtocolVersion(); err != nil { return nil, err @@ -528,6 +572,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, incomingRPCLimiter: incomingRPCLimiter, routineManager: routine.NewManager(logger.Named(logging.ConsulServer)), registry: flat.Registry, + useV2Resources: flat.UseV2Resources(), + useV2Tenancy: flat.UseV2Tenancy(), + hcpAllowV2Resources: flat.HCPAllowV2Resources(), } incomingRPCLimiter.Register(s) @@ -589,7 +636,15 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, rpcServerOpts := []func(*rpc.Server){ rpc.WithPreBodyInterceptor( - middleware.GetNetRPCRateLimitingInterceptor(s.incomingRPCLimiter, middleware.NewPanicHandler(s.logger)), + middleware.ChainedRPCPreBodyInterceptor( + func(reqServiceMethod string, sourceAddr net.Addr) error { + if s.useV2Resources && isV1CatalogRequest(reqServiceMethod) { + return structs.ErrUsingV2CatalogExperiment + } + return nil + }, + middleware.GetNetRPCRateLimitingInterceptor(s.incomingRPCLimiter, middleware.NewPanicHandler(s.logger)), + ), ), } @@ -692,7 +747,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, } // Initialize the Raft server. - if err := s.setupRaft(); err != nil { + if err := s.setupRaft(stringslice.Contains(flat.Experiments, CatalogResourceExperimentName)); err != nil { s.Shutdown() return nil, fmt.Errorf("Failed to start Raft: %v", err) } @@ -870,7 +925,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, pbresource.NewResourceServiceClient(s.insecureUnsafeGRPCChan), s.loggers.Named(logging.ControllerRuntime), ) - if err := s.registerControllers(flat); err != nil { + if err := s.registerControllers(flat, proxyUpdater); err != nil { return nil, err } go s.controllerManager.Run(&lib.StopChannelContext{StopCh: shutdownCh}) @@ -888,12 +943,22 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, // as establishing leadership could attempt to use autopilot and cause a panic. s.initAutopilot(config) - s.registrator = V1ConsulRegistrator{ - Datacenter: s.config.Datacenter, - FSM: s.fsm, - Logger: serverLogger, - NodeName: s.config.NodeName, - RaftApplyFunc: s.raftApplyMsgpack, + // Construct the registrator that makes sense for the catalog version + if s.useV2Resources { + s.registrator = V2ConsulRegistrator{ + Logger: serverLogger, + NodeName: s.config.NodeName, + EntMeta: s.config.AgentEnterpriseMeta(), + Client: pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), + } + } else { + s.registrator = V1ConsulRegistrator{ + Datacenter: s.config.Datacenter, + FSM: s.fsm, + Logger: serverLogger, + NodeName: s.config.NodeName, + RaftApplyFunc: s.raftApplyMsgpack, + } } // Start monitoring leadership. This must happen after Serf is set up @@ -928,17 +993,86 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, return s, nil } -func (s *Server) registerControllers(deps Deps) error { +func isV1CatalogRequest(rpcName string) bool { + switch { + case strings.HasPrefix(rpcName, "Catalog."), + strings.HasPrefix(rpcName, "Health."), + strings.HasPrefix(rpcName, "ConfigEntry."): + return true + } + + switch rpcName { + case "Internal.EventFire", "Internal.KeyringOperation", "Internal.OIDCAuthMethods": + return false + default: + if strings.HasPrefix(rpcName, "Internal.") { + return true + } + return false + } +} + +func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) error { if s.config.Cloud.IsConfigured() { hcpctl.RegisterControllers( s.controllerManager, hcpctl.ControllerDependencies{ - CloudConfig: deps.HCP.Config, + ResourceApisEnabled: s.useV2Resources, + HCPAllowV2ResourceApis: s.hcpAllowV2Resources, + CloudConfig: deps.HCP.Config, }, ) } - shim := NewExportedServicesShim(s) - multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim)) + // When not enabled, the v1 tenancy bridge is used by default. + if s.useV2Tenancy { + tenancy.RegisterControllers( + s.controllerManager, + tenancy.Dependencies{Registry: deps.Registry}, + ) + } + + if s.useV2Resources { + catalog.RegisterControllers(s.controllerManager) + defaultAllow, err := s.config.ACLResolverSettings.IsDefaultAllow() + if err != nil { + return err + } + + mesh.RegisterControllers(s.controllerManager, mesh.ControllerDependencies{ + TrustBundleFetcher: func() (*pbproxystate.TrustBundle, error) { + var bundle pbproxystate.TrustBundle + roots, err := s.getCARoots(nil, s.GetState()) + if err != nil { + return nil, err + } + bundle.TrustDomain = roots.TrustDomain + for _, root := range roots.Roots { + bundle.Roots = append(bundle.Roots, root.RootCert) + } + return &bundle, nil + }, + // This function is adapted from server_connect.go:getCARoots. + TrustDomainFetcher: func() (string, error) { + _, caConfig, err := s.fsm.State().CAConfig(nil) + if err != nil { + return "", err + } + + return s.getTrustDomain(caConfig) + }, + + LeafCertManager: deps.LeafCertManager, + LocalDatacenter: s.config.Datacenter, + DefaultAllow: defaultAllow, + ProxyUpdater: proxyUpdater, + }) + + auth.RegisterControllers(s.controllerManager, auth.DefaultControllerDependencies()) + multicluster.RegisterControllers(s.controllerManager) + } else { + shim := NewExportedServicesShim(s) + multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim)) + } reaper.RegisterControllers(s.controllerManager) @@ -975,7 +1109,7 @@ func (s *Server) connectCARootsMonitor(ctx context.Context) { } // setupRaft is used to setup and initialize Raft -func (s *Server) setupRaft() error { +func (s *Server) setupRaft(isCatalogResourceExperiment bool) error { // If we have an unclean exit then attempt to close the Raft store. defer func() { if s.raft == nil && s.raftStore != nil { @@ -1056,7 +1190,7 @@ func (s *Server) setupRaft() error { return nil } // Only use WAL if there is no existing raft.db, even if it's enabled. - if s.config.LogStoreConfig.Backend == LogStoreBackendDefault && !boltFileExists { + if s.config.LogStoreConfig.Backend == LogStoreBackendDefault && !boltFileExists && isCatalogResourceExperiment { s.config.LogStoreConfig.Backend = LogStoreBackendWAL if !s.config.LogStoreConfig.Verification.Enabled { s.config.LogStoreConfig.Verification.Enabled = true diff --git a/agent/consul/server_ce.go b/agent/consul/server_ce.go index dae8dc1516c57..b744f2ec72b1d 100644 --- a/agent/consul/server_ce.go +++ b/agent/consul/server_ce.go @@ -205,5 +205,6 @@ func (s *Server) newResourceServiceConfig(typeRegistry resource.Registry, resolv ACLResolver: resolver, Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Resource), TenancyBridge: tenancyBridge, + UseV2Tenancy: s.useV2Tenancy, } } diff --git a/agent/consul/server_grpc.go b/agent/consul/server_grpc.go index a190c44a05980..a4ff8660951b2 100644 --- a/agent/consul/server_grpc.go +++ b/agent/consul/server_grpc.go @@ -29,6 +29,8 @@ import ( "github.com/hashicorp/consul/agent/rpc/peering" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/tenancy" + "github.com/hashicorp/consul/lib/stringslice" "github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto/private/pbsubscribe" @@ -314,6 +316,7 @@ func (s *Server) setupGRPCServices(config *Config, deps Deps) error { // for anything internal in Consul to use the service. If that changes // we could register it on the in-process interfaces as well. err = s.registerDataplaneServer( + deps, s.externalGRPCServer, ) if err != nil { @@ -341,7 +344,20 @@ func (s *Server) registerResourceServiceServer(typeRegistry resource.Registry, r return fmt.Errorf("storage backend cannot be nil") } - tenancyBridge := NewV1TenancyBridge(s) + var tenancyBridge resourcegrpc.TenancyBridge + if s.useV2Tenancy { + tenancyBridge = tenancy.NewV2TenancyBridge().WithClient( + // This assumes that the resource service will be registered with + // the insecureUnsafeGRPCChan. We are using the insecure and unsafe + // channel here because the V2 Tenancy bridge only reads data + // from the client and does not modify it. Therefore sharing memory + // with the resource services canonical immutable data is advantageous + // to prevent wasting CPU time for every resource op to clone things. + pbresource.NewResourceServiceClient(s.insecureUnsafeGRPCChan), + ) + } else { + tenancyBridge = NewV1TenancyBridge(s) + } // Create the Resource Service Server srv := resourcegrpc.NewServer(s.newResourceServiceConfig(typeRegistry, resolver, tenancyBridge)) @@ -494,12 +510,14 @@ func (s *Server) registerConnectCAServer(registrars ...grpc.ServiceRegistrar) er return nil } -func (s *Server) registerDataplaneServer(registrars ...grpc.ServiceRegistrar) error { +func (s *Server) registerDataplaneServer(deps Deps, registrars ...grpc.ServiceRegistrar) error { srv := dataplane.NewServer(dataplane.Config{ - GetStore: func() dataplane.StateStore { return s.FSM().State() }, - Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Dataplane), - ACLResolver: s.ACLResolver, - Datacenter: s.config.Datacenter, + GetStore: func() dataplane.StateStore { return s.FSM().State() }, + Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Dataplane), + ACLResolver: s.ACLResolver, + Datacenter: s.config.Datacenter, + EnableV2: stringslice.Contains(deps.Experiments, CatalogResourceExperimentName), + ResourceAPIClient: pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan), }) for _, reg := range registrars { diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index f157fa6dd517c..e685f25ca4442 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -19,6 +19,10 @@ import ( "github.com/armon/go-metrics" "github.com/google/tcpproxy" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/memberlist" + "github.com/hashicorp/raft" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "golang.org/x/time/rate" @@ -26,10 +30,6 @@ import ( "google.golang.org/grpc/keepalive" "github.com/hashicorp/consul-net-rpc/net/rpc" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/memberlist" - "github.com/hashicorp/raft" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/multilimiter" @@ -43,6 +43,7 @@ import ( "github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" + proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/testutil" @@ -351,7 +352,8 @@ func newServerWithDeps(t testutil.TestingTB, c *Config, deps Deps) (*Server, err } } grpcServer := external.NewServer(deps.Logger.Named("grpc.external"), nil, deps.TLSConfigurator, rpcRate.NullRequestLimitsHandler(), keepalive.ServerParameters{}, nil) - srv, err := NewServer(c, deps, grpcServer, nil, deps.Logger) + proxyUpdater := proxytracker.NewProxyTracker(proxytracker.ProxyTrackerConfig{}) + srv, err := NewServer(c, deps, grpcServer, nil, deps.Logger, proxyUpdater) if err != nil { return nil, err } @@ -1258,7 +1260,7 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) { } } - s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) + s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1296,7 +1298,7 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) { return nil } - s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) + s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -1330,7 +1332,7 @@ func TestServer_RPC_RequestRecorder(t *testing.T) { deps := newDefaultDeps(t, conf) deps.NewRequestRecorderFunc = nil - s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) + s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) require.Error(t, err, "need err when provider func is nil") require.Equal(t, err.Error(), "cannot initialize server without an RPC request recorder provider") @@ -1349,7 +1351,7 @@ func TestServer_RPC_RequestRecorder(t *testing.T) { return nil } - s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger) + s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil) require.Error(t, err, "need err when RequestRecorder is nil") require.Equal(t, err.Error(), "cannot initialize server with a nil RPC request recorder") @@ -2313,7 +2315,7 @@ func TestServer_ControllerDependencies(t *testing.T) { _, conf := testServerConfig(t) deps := newDefaultDeps(t, conf) - deps.Experiments = []string{"resource-apis"} + deps.Experiments = []string{"resource-apis", "v2tenancy"} deps.LeafCertManager = &leafcert.Manager{} s1, err := newServerWithDeps(t, conf, deps) @@ -2323,10 +2325,6 @@ func TestServer_ControllerDependencies(t *testing.T) { // gotest.tools/v3 defines CLI flags which are incompatible wit the golden package // Once we eliminate gotest.tools/v3 from usage within Consul we could uncomment this // actual := fmt.Sprintf("```mermaid\n%s\n```", s1.controllerManager.CalculateDependencies(s1.registry.Types()).ToMermaid()) - // markdownFileName := "v2-resource-dependencies" - // if versiontest.IsEnterprise() { - // markdownFileName += "-enterprise" - // } - // expected := golden.Get(t, actual, markdownFileName) + // expected := golden.Get(t, actual, "v2-resource-dependencies") // require.Equal(t, expected, actual) } diff --git a/agent/consul/testdata/v2-resource-dependencies.md b/agent/consul/testdata/v2-resource-dependencies.md index 7bcb0d55c4f47..e394247866a42 100644 --- a/agent/consul/testdata/v2-resource-dependencies.md +++ b/agent/consul/testdata/v2-resource-dependencies.md @@ -1,5 +1,24 @@ ```mermaid flowchart TD + auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/namespacetrafficpermissions + auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/partitiontrafficpermissions + auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/trafficpermissions + auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/workloadidentity + auth/v2beta1/namespacetrafficpermissions + auth/v2beta1/partitiontrafficpermissions + auth/v2beta1/trafficpermissions + auth/v2beta1/workloadidentity + catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/failoverpolicy + catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/service + catalog/v2beta1/failoverpolicy + catalog/v2beta1/healthstatus + catalog/v2beta1/node --> catalog/v2beta1/nodehealthstatus + catalog/v2beta1/nodehealthstatus + catalog/v2beta1/service + catalog/v2beta1/serviceendpoints --> catalog/v2beta1/service + catalog/v2beta1/serviceendpoints --> catalog/v2beta1/workload + catalog/v2beta1/workload --> catalog/v2beta1/healthstatus + catalog/v2beta1/workload --> catalog/v2beta1/node demo/v1/album demo/v1/artist demo/v1/concept @@ -8,12 +27,42 @@ flowchart TD demo/v2/album demo/v2/artist hcp/v2/link - hcp/v2/telemetrystate + hcp/v2/telemetrystate --> hcp/v2/link internal/v1/tombstone + mesh/v2beta1/computedexplicitdestinations --> catalog/v2beta1/service + mesh/v2beta1/computedexplicitdestinations --> catalog/v2beta1/workload + mesh/v2beta1/computedexplicitdestinations --> mesh/v2beta1/computedroutes + mesh/v2beta1/computedexplicitdestinations --> mesh/v2beta1/destinations + mesh/v2beta1/computedproxyconfiguration --> catalog/v2beta1/workload + mesh/v2beta1/computedproxyconfiguration --> mesh/v2beta1/proxyconfiguration + mesh/v2beta1/computedroutes --> catalog/v2beta1/computedfailoverpolicy + mesh/v2beta1/computedroutes --> catalog/v2beta1/service + mesh/v2beta1/computedroutes --> mesh/v2beta1/destinationpolicy + mesh/v2beta1/computedroutes --> mesh/v2beta1/grpcroute + mesh/v2beta1/computedroutes --> mesh/v2beta1/httproute + mesh/v2beta1/computedroutes --> mesh/v2beta1/tcproute + mesh/v2beta1/destinationpolicy + mesh/v2beta1/destinations + mesh/v2beta1/grpcroute + mesh/v2beta1/httproute + mesh/v2beta1/meshconfiguration + mesh/v2beta1/meshgateway + mesh/v2beta1/proxyconfiguration + mesh/v2beta1/proxystatetemplate --> auth/v2beta1/computedtrafficpermissions + mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/service + mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/serviceendpoints + mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/workload + mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedexplicitdestinations + mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedproxyconfiguration + mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedroutes + mesh/v2beta1/proxystatetemplate --> multicluster/v2/computedexportedservices + mesh/v2beta1/tcproute + multicluster/v2/computedexportedservices --> catalog/v2beta1/service multicluster/v2/computedexportedservices --> multicluster/v2/exportedservices multicluster/v2/computedexportedservices --> multicluster/v2/namespaceexportedservices multicluster/v2/computedexportedservices --> multicluster/v2/partitionexportedservices multicluster/v2/exportedservices multicluster/v2/namespaceexportedservices multicluster/v2/partitionexportedservices + tenancy/v2beta1/namespace ``` \ No newline at end of file diff --git a/agent/consul/type_registry.go b/agent/consul/type_registry.go index cd2087e48f12e..450cef7e059a9 100644 --- a/agent/consul/type_registry.go +++ b/agent/consul/type_registry.go @@ -4,10 +4,14 @@ package consul import ( + "github.com/hashicorp/consul/internal/auth" + "github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/hcp" + "github.com/hashicorp/consul/internal/mesh" "github.com/hashicorp/consul/internal/multicluster" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" + "github.com/hashicorp/consul/internal/tenancy" ) // NewTypeRegistry returns a registry populated with all supported resource @@ -21,6 +25,10 @@ func NewTypeRegistry() resource.Registry { registry := resource.NewRegistry() demo.RegisterTypes(registry) + mesh.RegisterTypes(registry) + catalog.RegisterTypes(registry) + auth.RegisterTypes(registry) + tenancy.RegisterTypes(registry) multicluster.RegisterTypes(registry) hcp.RegisterTypes(registry) diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go index bbc2390a776b1..ea4852efab2be 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go @@ -8,12 +8,16 @@ import ( "errors" "strings" + "github.com/hashicorp/go-hclog" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/structpb" - "github.com/hashicorp/go-hclog" + "github.com/hashicorp/consul/internal/resource" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" @@ -46,6 +50,72 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G return nil, status.Error(codes.Unauthenticated, err.Error()) } + if s.EnableV2 { + // Get the workload. + workloadId := &pbresource.ID{ + Name: proxyID, + Tenancy: &pbresource.Tenancy{ + Namespace: req.Namespace, + Partition: req.Partition, + }, + Type: pbcatalog.WorkloadType, + } + workloadRsp, err := s.ResourceAPIClient.Read(ctx, &pbresource.ReadRequest{ + Id: workloadId, + }) + if err != nil { + // This error should already include the gRPC status code and so we don't need to wrap it + // in status.Error. + logger.Error("Error looking up workload", "error", err) + return nil, err + } + var workload pbcatalog.Workload + err = workloadRsp.Resource.Data.UnmarshalTo(&workload) + if err != nil { + return nil, status.Error(codes.Internal, "failed to parse workload data") + } + + // Only workloads that have an associated identity can ask for proxy bootstrap parameters. + if workload.Identity == "" { + return nil, status.Errorf(codes.InvalidArgument, "workload %q doesn't have identity associated with it", req.ProxyId) + } + + // verify identity:write is allowed. if not, give permission denied error. + if err := authz.ToAllowAuthorizer().IdentityWriteAllowed(workload.Identity, &authzContext); err != nil { + return nil, err + } + + computedProxyConfig, err := resource.GetDecodedResource[*pbmesh.ComputedProxyConfiguration]( + ctx, + s.ResourceAPIClient, + resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, workloadId)) + + if err != nil { + logger.Error("Error looking up ComputedProxyConfiguration for this workload", "error", err) + return nil, err + } + + rsp := &pbdataplane.GetEnvoyBootstrapParamsResponse{ + Identity: workload.Identity, + Partition: workloadRsp.Resource.Id.Tenancy.Partition, + Namespace: workloadRsp.Resource.Id.Tenancy.Namespace, + Datacenter: s.Datacenter, + NodeName: workload.NodeName, + } + + if computedProxyConfig != nil { + if computedProxyConfig.GetData().GetDynamicConfig() != nil { + rsp.AccessLogs = makeAccessLogs(computedProxyConfig.GetData().GetDynamicConfig().GetAccessLogs(), logger) + } + + rsp.BootstrapConfig = computedProxyConfig.GetData().GetBootstrapConfig() + } + + return rsp, nil + } + + // The remainder of this file focuses on v1 implementation of this endpoint. + store := s.GetStore() _, svc, err := store.ServiceNode(req.GetNodeId(), req.GetNodeName(), proxyID, &entMeta, structs.DefaultPeerKeyword) @@ -111,9 +181,9 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G }, nil } -func makeAccessLogs(logs *structs.AccessLogsConfig, logger hclog.Logger) []string { +func makeAccessLogs(logs structs.AccessLogs, logger hclog.Logger) []string { var accessLogs []string - if logs.Enabled { + if logs.GetEnabled() { envoyLoggers, err := accesslogs.MakeAccessLogs(logs, false) if err != nil { logger.Warn("Error creating the envoy access log config", "error", err) diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go index bcff21cce50be..2a50094029076 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go @@ -18,9 +18,18 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl/resolver" external "github.com/hashicorp/consul/agent/grpc-external" + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" "github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" "github.com/hashicorp/consul/proto-public/pbdataplane" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" ) const ( @@ -243,6 +252,156 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) { } } +func TestGetEnvoyBootstrapParams_Success_EnableV2(t *testing.T) { + type testCase struct { + name string + workloadData *pbcatalog.Workload + proxyCfg *pbmesh.ComputedProxyConfiguration + expBootstrapCfg *pbmesh.BootstrapConfig + expAccessLogs string + } + + run := func(t *testing.T, tc testCase) { + resourceClient := svctest.NewResourceServiceBuilder(). + WithRegisterFns(catalog.RegisterTypes, mesh.RegisterTypes). + Run(t) + + options := structs.QueryOptions{Token: testToken} + ctx, err := external.ContextWithQueryOptions(context.Background(), options) + require.NoError(t, err) + + aclResolver := &MockACLResolver{} + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + Datacenter: serverDC, + EnableV2: true, + ResourceAPIClient: resourceClient, + }) + client := testClient(t, server) + + // Add required fields to workload data. + tc.workloadData.Addresses = []*pbcatalog.WorkloadAddress{ + { + Host: "127.0.0.1", + }, + } + tc.workloadData.Ports = map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, + } + workloadResource := resourcetest.Resource(pbcatalog.WorkloadType, "test-workload"). + WithData(t, tc.workloadData). + WithTenancy(resource.DefaultNamespacedTenancy()). + Write(t, resourceClient) + + // Create computed proxy cfg resource. + resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, workloadResource.Id.Name). + WithData(t, tc.proxyCfg). + WithTenancy(resource.DefaultNamespacedTenancy()). + Write(t, resourceClient) + + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: workloadResource.Id.Name, + Namespace: workloadResource.Id.Tenancy.Namespace, + Partition: workloadResource.Id.Tenancy.Partition, + } + + aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). + Return(testutils.ACLUseProvidedPolicy(t, + &acl.Policy{ + PolicyRules: acl.PolicyRules{ + Services: []*acl.ServiceRule{ + { + Name: workloadResource.Id.Name, + Policy: acl.PolicyRead, + }, + }, + Identities: []*acl.IdentityRule{ + { + Name: testIdentity, + Policy: acl.PolicyWrite, + }, + }, + }, + }), nil) + + resp, err := client.GetEnvoyBootstrapParams(ctx, req) + require.NoError(t, err) + + require.Equal(t, tc.workloadData.Identity, resp.Identity) + require.Equal(t, serverDC, resp.Datacenter) + require.Equal(t, workloadResource.Id.Tenancy.Partition, resp.Partition) + require.Equal(t, workloadResource.Id.Tenancy.Namespace, resp.Namespace) + require.Equal(t, resp.NodeName, tc.workloadData.NodeName) + prototest.AssertDeepEqual(t, tc.expBootstrapCfg, resp.BootstrapConfig) + if tc.expAccessLogs != "" { + require.JSONEq(t, tc.expAccessLogs, resp.AccessLogs[0]) + } + } + + testCases := []testCase{ + { + name: "workload without node", + workloadData: &pbcatalog.Workload{ + Identity: testIdentity, + }, + expBootstrapCfg: nil, + }, + { + name: "workload with node", + workloadData: &pbcatalog.Workload{ + Identity: testIdentity, + NodeName: "test-node", + }, + expBootstrapCfg: nil, + }, + { + name: "single proxy configuration", + workloadData: &pbcatalog.Workload{ + Identity: testIdentity, + }, + proxyCfg: &pbmesh.ComputedProxyConfiguration{ + BootstrapConfig: &pbmesh.BootstrapConfig{ + DogstatsdUrl: "dogstats-url", + }, + }, + expBootstrapCfg: &pbmesh.BootstrapConfig{ + DogstatsdUrl: "dogstats-url", + }, + }, + { + name: "multiple proxy configurations", + workloadData: &pbcatalog.Workload{ + Identity: testIdentity, + }, + proxyCfg: &pbmesh.ComputedProxyConfiguration{ + BootstrapConfig: &pbmesh.BootstrapConfig{ + DogstatsdUrl: "dogstats-url", + StatsdUrl: "stats-url", + }, + DynamicConfig: &pbmesh.DynamicConfig{ + AccessLogs: &pbmesh.AccessLogsConfig{ + Enabled: true, + JsonFormat: "{ \"custom_field\": \"%START_TIME%\" }", + }, + }, + }, + expBootstrapCfg: &pbmesh.BootstrapConfig{ + DogstatsdUrl: "dogstats-url", + StatsdUrl: "stats-url", + }, + expAccessLogs: testAccessLogs, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + func TestGetEnvoyBootstrapParams_Error(t *testing.T) { type testCase struct { name string @@ -324,6 +483,100 @@ func TestGetEnvoyBootstrapParams_Error(t *testing.T) { } +func TestGetEnvoyBootstrapParams_Error_EnableV2(t *testing.T) { + type testCase struct { + name string + expectedErrCode codes.Code + expecteErrMsg string + workload *pbresource.Resource + } + + run := func(t *testing.T, tc testCase) { + resourceClient := svctest.NewResourceServiceBuilder(). + WithRegisterFns(catalog.RegisterTypes, mesh.RegisterTypes). + Run(t) + + options := structs.QueryOptions{Token: testToken} + ctx, err := external.ContextWithQueryOptions(context.Background(), options) + require.NoError(t, err) + + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). + Return(testutils.ACLServiceRead(t, "doesn't matter"), nil) + + server := NewServer(Config{ + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + Datacenter: serverDC, + EnableV2: true, + ResourceAPIClient: resourceClient, + }) + client := testClient(t, server) + + var req pbdataplane.GetEnvoyBootstrapParamsRequest + // Write the workload resource. + if tc.workload != nil { + _, err = resourceClient.Write(context.Background(), &pbresource.WriteRequest{ + Resource: tc.workload, + }) + require.NoError(t, err) + + req = pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: tc.workload.Id.Name, + Namespace: tc.workload.Id.Tenancy.Namespace, + Partition: tc.workload.Id.Tenancy.Partition, + } + } else { + req = pbdataplane.GetEnvoyBootstrapParamsRequest{ + ProxyId: "not-found", + Namespace: "default", + Partition: "default", + } + } + + resp, err := client.GetEnvoyBootstrapParams(ctx, &req) + require.Nil(t, resp) + require.Error(t, err) + errStatus, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, tc.expectedErrCode.String(), errStatus.Code().String()) + require.Equal(t, tc.expecteErrMsg, errStatus.Message()) + } + + workload := resourcetest.Resource(pbcatalog.WorkloadType, "test-workload"). + WithData(t, &pbcatalog.Workload{ + Addresses: []*pbcatalog.WorkloadAddress{ + {Host: "127.0.0.1"}, + }, + Ports: map[string]*pbcatalog.WorkloadPort{ + "tcp": {Port: 8080}, + }, + }). + WithTenancy(resource.DefaultNamespacedTenancy()). + Build() + + testCases := []testCase{ + { + name: "workload doesn't exist", + expectedErrCode: codes.NotFound, + expecteErrMsg: "resource not found", + }, + { + name: "workload without identity", + expectedErrCode: codes.InvalidArgument, + expecteErrMsg: "workload \"test-workload\" doesn't have identity associated with it", + workload: workload, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } + +} + func TestGetEnvoyBootstrapParams_Unauthenticated(t *testing.T) { // Mock the ACL resolver to return ErrNotFound. aclResolver := &MockACLResolver{} diff --git a/agent/grpc-external/services/dataplane/server.go b/agent/grpc-external/services/dataplane/server.go index 68972ce252247..3a1809cc048d5 100644 --- a/agent/grpc-external/services/dataplane/server.go +++ b/agent/grpc-external/services/dataplane/server.go @@ -4,6 +4,7 @@ package dataplane import ( + "github.com/hashicorp/consul/proto-public/pbresource" "google.golang.org/grpc" "github.com/hashicorp/go-hclog" @@ -26,6 +27,10 @@ type Config struct { ACLResolver ACLResolver // Datacenter of the Consul server this gRPC server is hosted on Datacenter string + + // EnableV2 indicates whether a feature flag for v2 APIs is provided. + EnableV2 bool + ResourceAPIClient pbresource.ResourceServiceClient } type StateStore interface { diff --git a/agent/grpc-external/services/resource/delete.go b/agent/grpc-external/services/resource/delete.go index 839bc7fa704ad..dbfdf07edb00f 100644 --- a/agent/grpc-external/services/resource/delete.go +++ b/agent/grpc-external/services/resource/delete.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) // Delete deletes a resource. @@ -199,10 +200,17 @@ func (s *Server) ensureDeleteRequestValid(req *pbresource.DeleteRequest) (*resou return nil, err } + if err = checkV2Tenancy(s.UseV2Tenancy, req.Id.Type); err != nil { + return nil, err + } + if err := validateScopedTenancy(reg.Scope, reg.Type, req.Id.Tenancy, false); err != nil { return nil, err } + if err := blockBuiltinsDeletion(reg.Type, req.Id); err != nil { + return nil, err + } return reg, nil } @@ -212,3 +220,12 @@ func TombstoneNameFor(deleteId *pbresource.ID) string { // deleteId.Name is just included for easier identification return fmt.Sprintf("tombstone-%v-%v", deleteId.Name, strings.ToLower(deleteId.Uid)) } + +func blockDefaultNamespaceDeletion(rtype *pbresource.Type, id *pbresource.ID) error { + if id.Name == resource.DefaultNamespaceName && + id.Tenancy.Partition == resource.DefaultPartitionName && + resource.EqualType(rtype, pbtenancy.NamespaceType) { + return status.Errorf(codes.InvalidArgument, "cannot delete default namespace") + } + return nil +} diff --git a/agent/grpc-external/services/resource/delete_ce.go b/agent/grpc-external/services/resource/delete_ce.go new file mode 100644 index 0000000000000..d2ff805a24a47 --- /dev/null +++ b/agent/grpc-external/services/resource/delete_ce.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !consulent + +package resource + +import "github.com/hashicorp/consul/proto-public/pbresource" + +func blockBuiltinsDeletion(rtype *pbresource.Type, id *pbresource.ID) error { + if err := blockDefaultNamespaceDeletion(rtype, id); err != nil { + return err + } + return nil +} diff --git a/agent/grpc-external/services/resource/delete_test.go b/agent/grpc-external/services/resource/delete_test.go index 25a8012051886..76403bb4d6baa 100644 --- a/agent/grpc-external/services/resource/delete_test.go +++ b/agent/grpc-external/services/resource/delete_test.go @@ -5,6 +5,7 @@ package resource_test import ( "context" + "fmt" "strings" "testing" @@ -21,6 +22,7 @@ import ( "github.com/hashicorp/consul/internal/resource/demo" rtest "github.com/hashicorp/consul/internal/resource/resourcetest" "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1" ) @@ -135,28 +137,37 @@ func TestDelete_InputValidation(t *testing.T) { }, } - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(useV2Tenancy). + WithRegisterFns(demo.RegisterTypes). + Run(t) - for desc, tc := range testCases { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) + for desc, tc := range testCases { + t.Run(desc, func(t *testing.T) { + run(t, client, tc) + }) + } }) } } func TestDelete_TypeNotRegistered(t *testing.T) { - client := svctest.NewResourceServiceBuilder().Run(t) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder().WithV2Tenancy(useV2Tenancy).Run(t) - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) - // delete artist with unregistered type - _, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.ErrorContains(t, err, "not registered") + // delete artist with unregistered type + _, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, "not registered") + }) + } } func TestDelete_ACLs(t *testing.T) { @@ -263,10 +274,15 @@ func TestDelete_Success(t *testing.T) { t.Run(desc, func(t *testing.T) { for tenancyDesc, modFn := range tenancyCases() { t.Run(tenancyDesc, func(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - run(t, client, tc, modFn) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(useV2Tenancy). + WithRegisterFns(demo.RegisterTypes). + Run(t) + run(t, client, tc, modFn) + }) + } }) } }) @@ -322,41 +338,46 @@ func TestDelete_NonCAS_Retry(t *testing.T) { func TestDelete_TombstoneDeletionDoesNotCreateNewTombstone(t *testing.T) { t.Parallel() - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + ctx := context.Background() + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(useV2Tenancy). + WithRegisterFns(demo.RegisterTypes). + Run(t) - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) - rsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) - artist = rsp.Resource + rsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) + require.NoError(t, err) + artist = rsp.Resource - // delete artist - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) - require.NoError(t, err) + // delete artist + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: artist.Id, Version: ""}) + require.NoError(t, err) - // verify artist's tombstone created - rsp2, err := client.Read(ctx, &pbresource.ReadRequest{ - Id: &pbresource.ID{ - Name: svc.TombstoneNameFor(artist.Id), - Type: resource.TypeV1Tombstone, - Tenancy: artist.Id.Tenancy, - }, - }) - require.NoError(t, err) - tombstone := rsp2.Resource + // verify artist's tombstone created + rsp2, err := client.Read(ctx, &pbresource.ReadRequest{ + Id: &pbresource.ID{ + Name: svc.TombstoneNameFor(artist.Id), + Type: resource.TypeV1Tombstone, + Tenancy: artist.Id.Tenancy, + }, + }) + require.NoError(t, err) + tombstone := rsp2.Resource - // delete artist's tombstone - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: tombstone.Id, Version: tombstone.Version}) - require.NoError(t, err) + // delete artist's tombstone + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: tombstone.Id, Version: tombstone.Version}) + require.NoError(t, err) - // verify no new tombstones created and artist's existing tombstone deleted - rsp3, err := client.List(ctx, &pbresource.ListRequest{Type: resource.TypeV1Tombstone, Tenancy: artist.Id.Tenancy}) - require.NoError(t, err) - require.Empty(t, rsp3.Resources) + // verify no new tombstones created and artist's existing tombstone deleted + rsp3, err := client.List(ctx, &pbresource.ListRequest{Type: resource.TypeV1Tombstone, Tenancy: artist.Id.Tenancy}) + require.NoError(t, err) + require.Empty(t, rsp3.Resources) + }) + } } func TestDelete_NotFound(t *testing.T) { @@ -371,13 +392,18 @@ func TestDelete_NotFound(t *testing.T) { require.NoError(t, err) } - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(useV2Tenancy). + WithRegisterFns(demo.RegisterTypes). + Run(t) - for desc, tc := range deleteTestCases() { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) + for desc, tc := range deleteTestCases() { + t.Run(desc, func(t *testing.T) { + run(t, client, tc) + }) + } }) } } @@ -385,86 +411,115 @@ func TestDelete_NotFound(t *testing.T) { func TestDelete_VersionMismatch(t *testing.T) { t.Parallel() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(useV2Tenancy). + WithRegisterFns(demo.RegisterTypes). + Run(t) - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) - rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist}) - require.NoError(t, err) + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist}) + require.NoError(t, err) - // delete with a version that is different from the stored version - _, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: rsp.Resource.Id, Version: "non-existent-version"}) - require.Error(t, err) - require.Equal(t, codes.Aborted.String(), status.Code(err).String()) - require.ErrorContains(t, err, "CAS operation failed") + // delete with a version that is different from the stored version + _, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: rsp.Resource.Id, Version: "non-existent-version"}) + require.Error(t, err) + require.Equal(t, codes.Aborted.String(), status.Code(err).String()) + require.ErrorContains(t, err, "CAS operation failed") + }) + } } func TestDelete_MarkedForDeletionWhenFinalizersPresent(t *testing.T) { - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create a resource with a finalizer - res := rtest.Resource(demo.TypeV1Artist, "manwithnoname"). - WithTenancy(resource.DefaultClusteredTenancy()). - WithData(t, &pbdemo.Artist{Name: "Man With No Name"}). - WithMeta(resource.FinalizerKey, "finalizer1"). - Write(t, client) - - // Delete it - _, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) - require.NoError(t, err) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + ctx := context.Background() + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(useV2Tenancy). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + // Create a resource with a finalizer + res := rtest.Resource(demo.TypeV1Artist, "manwithnoname"). + WithTenancy(resource.DefaultClusteredTenancy()). + WithData(t, &pbdemo.Artist{Name: "Man With No Name"}). + WithMeta(resource.FinalizerKey, "finalizer1"). + Write(t, client) + + // Delete it + _, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) + require.NoError(t, err) - // Verify resource has been marked for deletion - rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - require.True(t, resource.IsMarkedForDeletion(rsp.Resource)) + // Verify resource has been marked for deletion + rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) + require.NoError(t, err) + require.True(t, resource.IsMarkedForDeletion(rsp.Resource)) - // Delete again - should be no-op - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) - require.NoError(t, err) + // Delete again - should be no-op + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) + require.NoError(t, err) - // Verify no-op by checking version still the same - rsp2, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - rtest.RequireVersionUnchanged(t, rsp2.Resource, rsp.Resource.Version) + // Verify no-op by checking version still the same + rsp2, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) + require.NoError(t, err) + rtest.RequireVersionUnchanged(t, rsp2.Resource, rsp.Resource.Version) + }) + } } func TestDelete_ImmediatelyDeletedAfterFinalizersRemoved(t *testing.T) { - ctx := context.Background() - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - // Create a resource with a finalizer - res := rtest.Resource(demo.TypeV1Artist, "manwithnoname"). - WithTenancy(resource.DefaultClusteredTenancy()). - WithData(t, &pbdemo.Artist{Name: "Man With No Name"}). - WithMeta(resource.FinalizerKey, "finalizer1"). - Write(t, client) - - // Delete should mark it for deletion - _, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) - require.NoError(t, err) + for _, useV2Tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) { + ctx := context.Background() + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(useV2Tenancy). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + // Create a resource with a finalizer + res := rtest.Resource(demo.TypeV1Artist, "manwithnoname"). + WithTenancy(resource.DefaultClusteredTenancy()). + WithData(t, &pbdemo.Artist{Name: "Man With No Name"}). + WithMeta(resource.FinalizerKey, "finalizer1"). + Write(t, client) + + // Delete should mark it for deletion + _, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id}) + require.NoError(t, err) - // Remove the finalizer - rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) - require.NoError(t, err) - resource.RemoveFinalizer(rsp.Resource, "finalizer1") - _, err = client.Write(ctx, &pbresource.WriteRequest{Resource: rsp.Resource}) - require.NoError(t, err) + // Remove the finalizer + rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id}) + require.NoError(t, err) + resource.RemoveFinalizer(rsp.Resource, "finalizer1") + _, err = client.Write(ctx, &pbresource.WriteRequest{Resource: rsp.Resource}) + require.NoError(t, err) - // Delete should be immediate - _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: rsp.Resource.Id}) - require.NoError(t, err) + // Delete should be immediate + _, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: rsp.Resource.Id}) + require.NoError(t, err) + + // Verify deleted + _, err = client.Read(ctx, &pbresource.ReadRequest{Id: rsp.Resource.Id}) + require.Error(t, err) + require.Equal(t, codes.NotFound.String(), status.Code(err).String()) + }) + } +} + +func TestDelete_BlockDeleteDefaultNamespace(t *testing.T) { + client := svctest.NewResourceServiceBuilder().WithV2Tenancy(true).Run(t) - // Verify deleted - _, err = client.Read(ctx, &pbresource.ReadRequest{Id: rsp.Resource.Id}) + id := &pbresource.ID{ + Name: resource.DefaultNamespaceName, + Type: pbtenancy.NamespaceType, + Tenancy: &pbresource.Tenancy{Partition: resource.DefaultPartitionName}, + } + _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: id}) require.Error(t, err) - require.Equal(t, codes.NotFound.String(), status.Code(err).String()) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, "cannot delete default namespace") } type deleteTestCase struct { diff --git a/agent/grpc-external/services/resource/list.go b/agent/grpc-external/services/resource/list.go index 2e51c443d4a73..62ec2d7975bb6 100644 --- a/agent/grpc-external/services/resource/list.go +++ b/agent/grpc-external/services/resource/list.go @@ -104,6 +104,10 @@ func (s *Server) ensureListRequestValid(req *pbresource.ListRequest) (*resource. // not enabled in the license. _ = s.FeatureCheck(reg) + if err = checkV2Tenancy(s.UseV2Tenancy, req.Type); err != nil { + return nil, err + } + if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil { return nil, err } diff --git a/agent/grpc-external/services/resource/list_by_owner.go b/agent/grpc-external/services/resource/list_by_owner.go index 29e14e407246c..bb1868a620385 100644 --- a/agent/grpc-external/services/resource/list_by_owner.go +++ b/agent/grpc-external/services/resource/list_by_owner.go @@ -100,6 +100,10 @@ func (s *Server) ensureListByOwnerRequestValid(req *pbresource.ListByOwnerReques return nil, err } + if err = checkV2Tenancy(s.UseV2Tenancy, req.Owner.Type); err != nil { + return nil, err + } + if err = validateScopedTenancy(reg.Scope, reg.Type, req.Owner.Tenancy, true); err != nil { return nil, err } diff --git a/agent/grpc-external/services/resource/list_by_owner_test.go b/agent/grpc-external/services/resource/list_by_owner_test.go index 23c537dcd6fdd..92167042ea154 100644 --- a/agent/grpc-external/services/resource/list_by_owner_test.go +++ b/agent/grpc-external/services/resource/list_by_owner_test.go @@ -27,6 +27,8 @@ import ( "github.com/hashicorp/consul/proto/private/prototest" ) +// TODO: Update all tests to use true/false table test for v2tenancy + func TestListByOwner_InputValidation(t *testing.T) { client := svctest.NewResourceServiceBuilder(). WithRegisterFns(demo.RegisterTypes). diff --git a/agent/grpc-external/services/resource/list_test.go b/agent/grpc-external/services/resource/list_test.go index 43d5def0c37be..efcfa3cafd8c4 100644 --- a/agent/grpc-external/services/resource/list_test.go +++ b/agent/grpc-external/services/resource/list_test.go @@ -27,6 +27,8 @@ import ( "github.com/hashicorp/consul/proto/private/prototest" ) +// TODO: Update all tests to use true/false table test for v2tenancy + func TestList_InputValidation(t *testing.T) { client := svctest.NewResourceServiceBuilder(). WithRegisterFns(demo.RegisterTypes). diff --git a/agent/grpc-external/services/resource/mutate_and_validate.go b/agent/grpc-external/services/resource/mutate_and_validate.go index c58fd4a095726..7aa3519f38485 100644 --- a/agent/grpc-external/services/resource/mutate_and_validate.go +++ b/agent/grpc-external/services/resource/mutate_and_validate.go @@ -127,6 +127,10 @@ func (s *Server) ensureResourceValid(res *pbresource.Resource, enforceLicenseChe return nil, err } + if err = checkV2Tenancy(s.UseV2Tenancy, res.Id.Type); err != nil { + return nil, err + } + // Check scope if reg.Scope == resource.ScopePartition && res.Id.Tenancy.Namespace != "" { return nil, status.Errorf( diff --git a/agent/grpc-external/services/resource/mutate_and_validate_test.go b/agent/grpc-external/services/resource/mutate_and_validate_test.go index 6644f108d4022..8f163e778c790 100644 --- a/agent/grpc-external/services/resource/mutate_and_validate_test.go +++ b/agent/grpc-external/services/resource/mutate_and_validate_test.go @@ -4,6 +4,7 @@ package resource_test import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -33,13 +34,18 @@ func TestMutateAndValidate_InputValidation(t *testing.T) { require.ErrorContains(t, err, tc.errContains) } - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - for desc, tc := range resourceValidTestCases(t) { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) + for _, v2tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithRegisterFns(demo.RegisterTypes). + WithV2Tenancy(v2tenancy). + Run(t) + + for desc, tc := range resourceValidTestCases(t) { + t.Run(desc, func(t *testing.T) { + run(t, client, tc) + }) + } }) } } @@ -60,27 +66,39 @@ func TestMutateAndValidate_OwnerValidation(t *testing.T) { require.ErrorContains(t, err, tc.errorContains) } - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - for desc, tc := range ownerValidationTestCases(t) { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) + for _, v2tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithRegisterFns(demo.RegisterTypes). + WithV2Tenancy(v2tenancy). + Run(t) + + for desc, tc := range ownerValidationTestCases(t) { + t.Run(desc, func(t *testing.T) { + run(t, client, tc) + }) + } }) } } func TestMutateAndValidate_TypeNotFound(t *testing.T) { - client := svctest.NewResourceServiceBuilder().Run(t) + run := func(t *testing.T, client pbresource.ResourceServiceClient) { + res, err := demo.GenerateV2Artist() + require.NoError(t, err) - res, err := demo.GenerateV2Artist() - require.NoError(t, err) + _, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: res}) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") + } - _, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: res}) - require.Error(t, err) - require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) - require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") + for _, v2tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder().WithV2Tenancy(v2tenancy).Run(t) + run(t, client) + }) + } } func TestMutateAndValidate_Success(t *testing.T) { @@ -96,40 +114,72 @@ func TestMutateAndValidate_Success(t *testing.T) { prototest.AssertDeepEqual(t, tc.expectedTenancy, rsp.Resource.Id.Tenancy) } - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) - - for desc, tc := range mavOrWriteSuccessTestCases(t) { - t.Run(desc, func(t *testing.T) { - run(t, client, tc) + for _, v2tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithRegisterFns(demo.RegisterTypes). + WithV2Tenancy(v2tenancy). + Run(t) + + for desc, tc := range mavOrWriteSuccessTestCases(t) { + t.Run(desc, func(t *testing.T) { + run(t, client, tc) + }) + } }) } } func TestMutateAndValidate_Mutate(t *testing.T) { - client := svctest.NewResourceServiceBuilder(). - WithRegisterFns(demo.RegisterTypes). - Run(t) + for _, v2tenancy := range []bool{false, true} { + t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithRegisterFns(demo.RegisterTypes). + WithV2Tenancy(v2tenancy). + Run(t) + + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + + artistData := &pbdemov2.Artist{} + artist.Data.UnmarshalTo(artistData) + require.NoError(t, err) - artist, err := demo.GenerateV2Artist() - require.NoError(t, err) + // mutate hook sets genre to disco when unspecified + artistData.Genre = pbdemov2.Genre_GENRE_UNSPECIFIED + artist.Data.MarshalFrom(artistData) + require.NoError(t, err) - artistData := &pbdemov2.Artist{} - artist.Data.UnmarshalTo(artistData) - require.NoError(t, err) + rsp, err := client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: artist}) + require.NoError(t, err) - // mutate hook sets genre to disco when unspecified - artistData.Genre = pbdemov2.Genre_GENRE_UNSPECIFIED - artist.Data.MarshalFrom(artistData) - require.NoError(t, err) + // verify mutate hook set genre to disco + require.NoError(t, rsp.Resource.Data.UnmarshalTo(artistData)) + require.Equal(t, pbdemov2.Genre_GENRE_DISCO, artistData.Genre) + }) + } +} - rsp, err := client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: artist}) - require.NoError(t, err) +func TestMutateAndValidate_Tenancy_NotFound(t *testing.T) { + for desc, tc := range mavOrWriteTenancyNotFoundTestCases(t) { + t.Run(desc, func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") + require.NoError(t, err) - // verify mutate hook set genre to disco - require.NoError(t, rsp.Resource.Data.UnmarshalTo(artistData)) - require.Equal(t, pbdemov2.Genre_GENRE_DISCO, artistData.Genre) + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + + _, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: tc.modFn(artist, recordLabel)}) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.Contains(t, err.Error(), tc.errContains) + }) + } } func TestMutateAndValidate_TenancyMarkedForDeletion_Fails(t *testing.T) { diff --git a/agent/grpc-external/services/resource/read.go b/agent/grpc-external/services/resource/read.go index bf69e2549a35e..48d07a337569a 100644 --- a/agent/grpc-external/services/resource/read.go +++ b/agent/grpc-external/services/resource/read.go @@ -106,6 +106,10 @@ func (s *Server) ensureReadRequestValid(req *pbresource.ReadRequest) (*resource. // not enabled in the license. _ = s.FeatureCheck(reg) + if err = checkV2Tenancy(s.UseV2Tenancy, req.Id.Type); err != nil { + return nil, err + } + // Check scope if err = validateScopedTenancy(reg.Scope, req.Id.Type, req.Id.Tenancy, false); err != nil { return nil, err diff --git a/agent/grpc-external/services/resource/read_test.go b/agent/grpc-external/services/resource/read_test.go index fbea0137af68f..b7367e6390319 100644 --- a/agent/grpc-external/services/resource/read_test.go +++ b/agent/grpc-external/services/resource/read_test.go @@ -30,6 +30,8 @@ import ( "github.com/hashicorp/consul/sdk/testutil" ) +// TODO: Update all tests to use true/false table test for v2tenancy + func TestRead_InputValidation(t *testing.T) { client := svctest.NewResourceServiceBuilder(). WithRegisterFns(demo.RegisterTypes). @@ -160,6 +162,74 @@ func TestRead_TypeNotFound(t *testing.T) { require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered") } +func TestRead_ResourceNotFound(t *testing.T) { + for desc, tc := range readTestCases() { + t.Run(desc, func(t *testing.T) { + type tenancyCase struct { + modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID + errContains string + } + tenancyCases := map[string]tenancyCase{ + "resource not found by name": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + artistId.Name = "bogusname" + return artistId + }, + errContains: "resource not found", + }, + "partition not found when namespace scoped": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + id := clone(artistId) + id.Tenancy.Partition = "boguspartition" + return id + }, + errContains: "partition not found", + }, + "namespace not found when namespace scoped": { + modFn: func(artistId, _ *pbresource.ID) *pbresource.ID { + id := clone(artistId) + id.Tenancy.Namespace = "bogusnamespace" + return id + }, + errContains: "namespace not found", + }, + "partition not found when partition scoped": { + modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID { + id := clone(recordLabelId) + id.Tenancy.Partition = "boguspartition" + return id + }, + errContains: "partition not found", + }, + } + for tenancyDesc, tenancyCase := range tenancyCases { + t.Run(tenancyDesc, func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") + require.NoError(t, err) + _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: recordLabel}) + require.NoError(t, err) + + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist}) + require.NoError(t, err) + + // Each tenancy test case picks which resource to use based on the resource type's scope. + _, err = client.Read(tc.ctx, &pbresource.ReadRequest{Id: tenancyCase.modFn(artist.Id, recordLabel.Id)}) + require.Error(t, err) + require.Equal(t, codes.NotFound.String(), status.Code(err).String()) + require.ErrorContains(t, err, tenancyCase.errContains) + }) + } + }) + } +} + func TestRead_GroupVersionMismatch(t *testing.T) { for desc, tc := range readTestCases() { t.Run(desc, func(t *testing.T) { diff --git a/agent/grpc-external/services/resource/server_ce.go b/agent/grpc-external/services/resource/server_ce.go index 88f6e60add28c..6b2551b06b9e4 100644 --- a/agent/grpc-external/services/resource/server_ce.go +++ b/agent/grpc-external/services/resource/server_ce.go @@ -6,11 +6,15 @@ package resource import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) func v2TenancyToV1EntMeta(tenancy *pbresource.Tenancy) *acl.EnterpriseMeta { @@ -27,6 +31,15 @@ func v1EntMetaToV2Tenancy(reg *resource.Registration, entMeta *acl.EnterpriseMet } } +// checkV2Tenancy returns FailedPrecondition error for namespace resource type +// when the "v2tenancy" feature flag is not enabled. +func checkV2Tenancy(useV2Tenancy bool, rtype *pbresource.Type) error { + if resource.EqualType(rtype, pbtenancy.NamespaceType) && !useV2Tenancy { + return status.Errorf(codes.FailedPrecondition, "use of the v2 namespace resource requires the \"v2tenancy\" feature flag") + } + return nil +} + type Config struct { Logger hclog.Logger Registry Registry @@ -37,6 +50,11 @@ type Config struct { // TenancyBridge temporarily allows us to use V1 implementations of // partitions and namespaces until V2 implementations are available. TenancyBridge TenancyBridge + + // UseV2Tenancy is true if the "v2tenancy" experiment is active, false otherwise. + // Attempts to create v2 tenancy resources (partition or namespace) will fail when the + // flag is false. + UseV2Tenancy bool } // FeatureCheck does not apply to the community edition. diff --git a/agent/grpc-external/services/resource/testing/builder.go b/agent/grpc-external/services/resource/testing/builder.go index 8c42096746835..ea61928ce32e6 100644 --- a/agent/grpc-external/services/resource/testing/builder.go +++ b/agent/grpc-external/services/resource/testing/builder.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/storage/inmem" + "github.com/hashicorp/consul/internal/tenancy" "github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/sdk/testutil" ) @@ -25,14 +26,25 @@ import ( // making requests. func NewResourceServiceBuilder() *Builder { b := &Builder{ - registry: resource.NewRegistry(), - // Always make sure the builtin tenancy exists. + useV2Tenancy: false, + registry: resource.NewRegistry(), + // Regardless of whether using mock of v2tenancy, always make sure + // the builtin tenancy exists. tenancies: []*pbresource.Tenancy{resource.DefaultNamespacedTenancy()}, cloning: true, } return b } +// WithV2Tenancy configures which tenancy bridge is used. +// +// true => real v2 default partition and namespace via v2 tenancy bridge +// false => mock default partition and namespace since v1 tenancy bridge can't be used (not spinning up an entire server here) +func (b *Builder) WithV2Tenancy(useV2Tenancy bool) *Builder { + b.useV2Tenancy = useV2Tenancy + return b +} + // Registry provides access to the constructed registry post-Run() when // needed by other test dependencies. func (b *Builder) Registry() resource.Registry { @@ -94,22 +106,33 @@ func (b *Builder) Run(t testutil.TestingTB) pbresource.ResourceServiceClient { t.Cleanup(cancel) go backend.Run(ctx) + // Automatically add tenancy types if v2 tenancy enabled + if b.useV2Tenancy { + b.registerFns = append(b.registerFns, tenancy.RegisterTypes) + } + for _, registerFn := range b.registerFns { registerFn(b.registry) } - // use mock tenancy bridge. default/default has already been added out of the box - mockTenancyBridge := &svc.MockTenancyBridge{} - - for _, tenancy := range b.tenancies { - mockTenancyBridge.On("PartitionExists", tenancy.Partition).Return(true, nil) - mockTenancyBridge.On("NamespaceExists", tenancy.Partition, tenancy.Namespace).Return(true, nil) - mockTenancyBridge.On("IsPartitionMarkedForDeletion", tenancy.Partition).Return(false, nil) - mockTenancyBridge.On("IsNamespaceMarkedForDeletion", tenancy.Partition, tenancy.Namespace).Return(false, nil) + var tenancyBridge resource.TenancyBridge + if !b.useV2Tenancy { + // use mock tenancy bridge. default/default has already been added out of the box + mockTenancyBridge := &svc.MockTenancyBridge{} + + for _, tenancy := range b.tenancies { + mockTenancyBridge.On("PartitionExists", tenancy.Partition).Return(true, nil) + mockTenancyBridge.On("NamespaceExists", tenancy.Partition, tenancy.Namespace).Return(true, nil) + mockTenancyBridge.On("IsPartitionMarkedForDeletion", tenancy.Partition).Return(false, nil) + mockTenancyBridge.On("IsNamespaceMarkedForDeletion", tenancy.Partition, tenancy.Namespace).Return(false, nil) + } + + tenancyBridge = mockTenancyBridge + } else { + // use v2 tenancy bridge. population comes later after client injected. + tenancyBridge = tenancy.NewV2TenancyBridge() } - tenancyBridge := mockTenancyBridge - if b.aclResolver == nil { // When not provided (regardless of V1 tenancy or V2 tenancy), configure an ACL resolver // that has ACLs disabled and fills in "default" for the partition and namespace when @@ -149,5 +172,22 @@ func (b *Builder) Run(t testutil.TestingTB) pbresource.ResourceServiceClient { client = pbresource.NewCloningResourceServiceClient(client) } + // HACK ALERT: The client needs to be injected into the V2TenancyBridge + // after it has been created due the circular dependency. This will + // go away when the tenancy bridge is removed and V1 is no more, however + // long that takes. + switch config.TenancyBridge.(type) { + case *tenancy.V2TenancyBridge: + config.TenancyBridge.(*tenancy.V2TenancyBridge).WithClient(client) + // Default partition and namespace can finally be created + require.NoError(t, initTenancy(ctx, backend)) + + for _, tenancy := range b.tenancies { + if tenancy.Partition == resource.DefaultPartitionName && tenancy.Namespace == resource.DefaultNamespaceName { + continue + } + t.Fatalf("TODO: implement creation of passed in v2 tenancy: %v", tenancy) + } + } return client } diff --git a/agent/grpc-external/services/resource/testing/builder_ce.go b/agent/grpc-external/services/resource/testing/builder_ce.go index 90954e4bfb46d..d7f9a7c733025 100644 --- a/agent/grpc-external/services/resource/testing/builder_ce.go +++ b/agent/grpc-external/services/resource/testing/builder_ce.go @@ -14,12 +14,13 @@ import ( ) type Builder struct { - registry resource.Registry - registerFns []func(resource.Registry) - tenancies []*pbresource.Tenancy - aclResolver svc.ACLResolver - serviceImpl *svc.Server - cloning bool + registry resource.Registry + registerFns []func(resource.Registry) + useV2Tenancy bool + tenancies []*pbresource.Tenancy + aclResolver svc.ACLResolver + serviceImpl *svc.Server + cloning bool } func (b *Builder) ensureLicenseManager() { @@ -32,5 +33,6 @@ func (b *Builder) newConfig(logger hclog.Logger, backend svc.Backend, tenancyBri Backend: backend, ACLResolver: b.aclResolver, TenancyBridge: tenancyBridge, + UseV2Tenancy: b.useV2Tenancy, } } diff --git a/agent/grpc-external/services/resource/testing/testing_ce.go b/agent/grpc-external/services/resource/testing/testing_ce.go index 023fa5189cccc..926acf6d38f74 100644 --- a/agent/grpc-external/services/resource/testing/testing_ce.go +++ b/agent/grpc-external/services/resource/testing/testing_ce.go @@ -6,7 +6,19 @@ package testing import ( + "context" + "errors" + "time" + + "github.com/oklog/ulid/v2" + "google.golang.org/protobuf/types/known/anypb" + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/storage" + "github.com/hashicorp/consul/internal/storage/inmem" + "github.com/hashicorp/consul/proto-public/pbresource" + pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1" ) func FillEntMeta(entMeta *acl.EnterpriseMeta) { @@ -16,3 +28,36 @@ func FillEntMeta(entMeta *acl.EnterpriseMeta) { func FillAuthorizerContext(authzContext *acl.AuthorizerContext) { // nothing to to in CE. } + +// initTenancy creates the builtin v2 namespace resource only. The builtin +// v2 partition is not created because we're in CE. +func initTenancy(ctx context.Context, b *inmem.Backend) error { + nsData, err := anypb.New(&pbtenancy.Namespace{Description: "default namespace in default partition"}) + if err != nil { + return err + } + nsID := &pbresource.ID{ + Type: pbtenancy.NamespaceType, + Name: resource.DefaultNamespaceName, + Tenancy: resource.DefaultPartitionedTenancy(), + Uid: ulid.Make().String(), + } + read, err := b.Read(ctx, storage.StrongConsistency, nsID) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return err + } + if read == nil && errors.Is(err, storage.ErrNotFound) { + _, err = b.WriteCAS(ctx, &pbresource.Resource{ + Id: nsID, + Generation: ulid.Make().String(), + Data: nsData, + Metadata: map[string]string{ + "generated_at": time.Now().Format(time.RFC3339), + }, + }) + if err != nil { + return err + } + } + return nil +} diff --git a/agent/grpc-external/services/resource/watch.go b/agent/grpc-external/services/resource/watch.go index 246ae4e296cf2..511802f2cc206 100644 --- a/agent/grpc-external/services/resource/watch.go +++ b/agent/grpc-external/services/resource/watch.go @@ -130,6 +130,10 @@ func (s *Server) ensureWatchListRequestValid(req *pbresource.WatchListRequest) ( req.Tenancy = wildcardTenancyFor(reg.Scope) } + if err = checkV2Tenancy(s.UseV2Tenancy, req.Type); err != nil { + return nil, err + } + if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil { return nil, err } diff --git a/agent/grpc-external/services/resource/watch_test.go b/agent/grpc-external/services/resource/watch_test.go index 73f164e1a999e..5ccdb609babad 100644 --- a/agent/grpc-external/services/resource/watch_test.go +++ b/agent/grpc-external/services/resource/watch_test.go @@ -27,6 +27,8 @@ import ( "github.com/hashicorp/consul/proto/private/prototest" ) +// TODO: Update all tests to use true/false table test for v2tenancy + func TestWatchList_InputValidation(t *testing.T) { client := svctest.NewResourceServiceBuilder(). WithRegisterFns(demo.RegisterTypes). diff --git a/agent/grpc-external/services/resource/write_status_test.go b/agent/grpc-external/services/resource/write_status_test.go index 4c524430251bd..57431eac54e3b 100644 --- a/agent/grpc-external/services/resource/write_status_test.go +++ b/agent/grpc-external/services/resource/write_status_test.go @@ -23,6 +23,8 @@ import ( "github.com/hashicorp/consul/proto-public/pbresource" ) +// TODO: Update all tests to use true/false table test for v2tenancy + func TestWriteStatus_ACL(t *testing.T) { type testCase struct { authz resolver.Result @@ -369,6 +371,66 @@ func TestWriteStatus_Tenancy_Defaults(t *testing.T) { } } +func TestWriteStatus_Tenancy_NotFound(t *testing.T) { + for desc, tc := range map[string]struct { + scope resource.Scope + modFn func(req *pbresource.WriteStatusRequest) + errCode codes.Code + errContains string + }{ + "namespaced resource provides nonexistant partition": { + scope: resource.ScopeNamespace, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "bad" }, + errCode: codes.InvalidArgument, + errContains: "partition", + }, + "namespaced resource provides nonexistant namespace": { + scope: resource.ScopeNamespace, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "bad" }, + errCode: codes.InvalidArgument, + errContains: "namespace", + }, + "partitioned resource provides nonexistant partition": { + scope: resource.ScopePartition, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "bad" }, + errCode: codes.InvalidArgument, + errContains: "partition", + }, + } { + t.Run(desc, func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + // Pick resource based on scope of type in testcase. + var res *pbresource.Resource + var err error + switch tc.scope { + case resource.ScopeNamespace: + res, err = demo.GenerateV2Artist() + case resource.ScopePartition: + res, err = demo.GenerateV1RecordLabel("looney-tunes") + } + require.NoError(t, err) + + // Fill in required fields so validation continues until tenancy is checked + req := validWriteStatusRequest(t, res) + req.Id.Uid = ulid.Make().String() + req.Status.ObservedGeneration = ulid.Make().String() + + // Write status with tenancy modded by testcase. + tc.modFn(req) + _, err = client.WriteStatus(testContext(t), req) + + // Verify non-existant tenancy field is the cause of the error. + require.Error(t, err) + require.Equal(t, tc.errCode.String(), status.Code(err).String()) + require.Contains(t, err.Error(), tc.errContains) + }) + } +} + func TestWriteStatus_CASFailure(t *testing.T) { client := svctest.NewResourceServiceBuilder(). WithRegisterFns(demo.RegisterTypes). diff --git a/agent/grpc-external/services/resource/write_test.go b/agent/grpc-external/services/resource/write_test.go index c14aaad0e1ebd..beb47b6f22e4e 100644 --- a/agent/grpc-external/services/resource/write_test.go +++ b/agent/grpc-external/services/resource/write_test.go @@ -4,13 +4,16 @@ package resource_test import ( + "context" "testing" + "time" "github.com/oklog/ulid/v2" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" "github.com/hashicorp/consul/acl/resolver" svc "github.com/hashicorp/consul/agent/grpc-external/services/resource" @@ -19,11 +22,14 @@ import ( "github.com/hashicorp/consul/internal/resource/demo" rtest "github.com/hashicorp/consul/internal/resource/resourcetest" "github.com/hashicorp/consul/proto-public/pbresource" + pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1" pbdemov1 "github.com/hashicorp/consul/proto/private/pbdemo/v1" pbdemov2 "github.com/hashicorp/consul/proto/private/pbdemo/v2" "github.com/hashicorp/consul/proto/private/prototest" ) +// TODO: Update all tests to use true/false table test for v2tenancy + func TestWrite_InputValidation(t *testing.T) { client := svctest.NewResourceServiceBuilder(). WithRegisterFns(demo.RegisterTypes). @@ -180,6 +186,46 @@ func TestWrite_Create_Success(t *testing.T) { } } +func TestWrite_Create_Tenancy_NotFound(t *testing.T) { + for desc, tc := range mavOrWriteTenancyNotFoundTestCases(t) { + t.Run(desc, func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes") + require.NoError(t, err) + + artist, err := demo.GenerateV2Artist() + require.NoError(t, err) + + _, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel)}) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.Contains(t, err.Error(), tc.errContains) + }) + } +} + +func TestWrite_Create_With_DeletionTimestamp_Fails(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + res := rtest.Resource(demo.TypeV1Artist, "blur"). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, &pbdemov1.Artist{Name: "Blur"}). + WithMeta(resource.DeletionTimestampKey, time.Now().Format(time.RFC3339)). + Build() + + _, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res}) + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.Contains(t, err.Error(), resource.DeletionTimestampKey) +} + func TestWrite_Create_With_TenancyMarkedForDeletion_Fails(t *testing.T) { for desc, tc := range mavOrWriteTenancyMarkedForDeletionTestCases(t) { t.Run(desc, func(t *testing.T) { @@ -644,3 +690,239 @@ func TestEnsureFinalizerRemoved(t *testing.T) { }) } } + +func TestWrite_ResourceFrozenAfterMarkedForDeletion(t *testing.T) { + type testCase struct { + modFn func(res *pbresource.Resource) + errContains string + } + testCases := map[string]testCase{ + "no-op write rejected": { + modFn: func(res *pbresource.Resource) {}, + errContains: "cannot no-op write resource marked for deletion", + }, + "remove one finalizer": { + modFn: func(res *pbresource.Resource) { + resource.RemoveFinalizer(res, "finalizer1") + }, + }, + "remove all finalizers": { + modFn: func(res *pbresource.Resource) { + resource.RemoveFinalizer(res, "finalizer1") + resource.RemoveFinalizer(res, "finalizer2") + }, + }, + "adding finalizer fails": { + modFn: func(res *pbresource.Resource) { + resource.AddFinalizer(res, "finalizer3") + }, + errContains: "expected at least one finalizer to be removed", + }, + "remove deletionTimestamp fails": { + modFn: func(res *pbresource.Resource) { + delete(res.Metadata, resource.DeletionTimestampKey) + }, + errContains: "cannot remove deletionTimestamp", + }, + "modify deletionTimestamp fails": { + modFn: func(res *pbresource.Resource) { + res.Metadata[resource.DeletionTimestampKey] = "bad" + }, + errContains: "cannot modify deletionTimestamp", + }, + "modify data fails": { + modFn: func(res *pbresource.Resource) { + var err error + res.Data, err = anypb.New(&pbdemo.Artist{Name: "New Order"}) + require.NoError(t, err) + }, + errContains: "cannot modify data", + }, + } + + for desc, tc := range testCases { + t.Run(desc, func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + // Create a resource with finalizers + res := rtest.Resource(demo.TypeV1Artist, "joydivision"). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, &pbdemo.Artist{Name: "Joy Division"}). + WithMeta(resource.FinalizerKey, "finalizer1 finalizer2"). + Write(t, client) + + // Mark for deletion - resource should now be frozen + _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id}) + require.NoError(t, err) + + // Verify marked for deletion + rsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: res.Id}) + require.NoError(t, err) + require.True(t, resource.IsMarkedForDeletion(rsp.Resource)) + + // Apply test case mods + tc.modFn(rsp.Resource) + + // Verify write results + _, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: rsp.Resource}) + if tc.errContains == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String()) + require.ErrorContains(t, err, tc.errContains) + } + }) + } +} + +func TestWrite_NonCASWritePreservesFinalizers(t *testing.T) { + type testCase struct { + existingMeta map[string]string + inputMeta map[string]string + expectedMeta map[string]string + } + testCases := map[string]testCase{ + "input nil metadata preserves existing finalizers": { + inputMeta: nil, + existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + }, + "input metadata and no finalizer key preserves existing finalizers": { + inputMeta: map[string]string{}, + existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + }, + "input metadata and with empty finalizer key overwrites existing finalizers": { + inputMeta: map[string]string{resource.FinalizerKey: ""}, + existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + expectedMeta: map[string]string{resource.FinalizerKey: ""}, + }, + "input metadata with one finalizer key overwrites multiple existing finalizers": { + inputMeta: map[string]string{resource.FinalizerKey: "finalizer2"}, + existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + expectedMeta: map[string]string{resource.FinalizerKey: "finalizer2"}, + }, + } + + for desc, tc := range testCases { + t.Run(desc, func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + // Create the resource based on tc.existingMetadata + builder := rtest.Resource(demo.TypeV1Artist, "joydivision"). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, &pbdemo.Artist{Name: "Joy"}) + + if tc.existingMeta != nil { + for k, v := range tc.existingMeta { + builder.WithMeta(k, v) + } + } + res := builder.Write(t, client) + + // Build resource for user write based on tc.inputMetadata + builder = rtest.Resource(demo.TypeV1Artist, res.Id.Name). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, &pbdemo.Artist{Name: "Joy Division"}) + + if tc.inputMeta != nil { + for k, v := range tc.inputMeta { + builder.WithMeta(k, v) + } + } + userRes := builder.Build() + + // Perform the user write + rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: userRes}) + require.NoError(t, err) + + // Verify write result preserved metadata based on testcase.expecteMetadata + for k := range tc.expectedMeta { + require.Equal(t, tc.expectedMeta[k], rsp.Resource.Metadata[k]) + } + require.Equal(t, len(tc.expectedMeta), len(rsp.Resource.Metadata)) + }) + } +} + +func TestWrite_NonCASWritePreservesDeletionTimestamp(t *testing.T) { + type testCase struct { + existingMeta map[string]string + inputMeta map[string]string + expectedMeta map[string]string + } + + // deletionTimestamp has to be generated via Delete() call and can't be embedded in testdata + // even though testcase desc refers to it. + testCases := map[string]testCase{ + "input metadata no deletion timestamp preserves existing deletion timestamp and removes single finalizer": { + inputMeta: map[string]string{resource.FinalizerKey: "finalizer1"}, + existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1"}, + }, + "input metadata no deletion timestamp preserves existing deletion timestamp and removes all finalizers": { + inputMeta: map[string]string{resource.FinalizerKey: ""}, + existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"}, + expectedMeta: map[string]string{resource.FinalizerKey: ""}, + }, + } + + for desc, tc := range testCases { + t.Run(desc, func(t *testing.T) { + client := svctest.NewResourceServiceBuilder(). + WithV2Tenancy(true). + WithRegisterFns(demo.RegisterTypes). + Run(t) + + // Create the resource based on tc.existingMetadata + builder := rtest.Resource(demo.TypeV1Artist, "joydivision"). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, &pbdemo.Artist{Name: "Joy Division"}) + + if tc.existingMeta != nil { + for k, v := range tc.existingMeta { + builder.WithMeta(k, v) + } + } + res := builder.Write(t, client) + + // Mark for deletion + _, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id}) + require.NoError(t, err) + + // Re-read the deleted res for future comparison of deletionTimestamp + delRsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: res.Id}) + require.NoError(t, err) + + // Build resource for user write based on tc.inputMetadata + builder = rtest.Resource(demo.TypeV1Artist, res.Id.Name). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, &pbdemo.Artist{Name: "Joy Division"}) + + if tc.inputMeta != nil { + for k, v := range tc.inputMeta { + builder.WithMeta(k, v) + } + } + userRes := builder.Build() + + // Perform the non-CAS user write + rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: userRes}) + require.NoError(t, err) + + // Verify write result preserved metadata based on testcase.expectedMetadata + for k := range tc.expectedMeta { + require.Equal(t, tc.expectedMeta[k], rsp.Resource.Metadata[k]) + } + // Verify deletion timestamp preserved even though it wasn't passed in to the write + require.Equal(t, delRsp.Resource.Metadata[resource.DeletionTimestampKey], rsp.Resource.Metadata[resource.DeletionTimestampKey]) + }) + } +} diff --git a/agent/health_endpoint_test.go b/agent/health_endpoint_test.go index 0768cbb223295..98f4eaa71476f 100644 --- a/agent/health_endpoint_test.go +++ b/agent/health_endpoint_test.go @@ -15,11 +15,10 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/hashicorp/serf/coordinate" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/hashicorp/serf/coordinate" - "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" @@ -28,6 +27,25 @@ import ( "github.com/hashicorp/consul/types" ) +func TestHealthEndpointsFailInV2(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t, `experiments = ["resource-apis"]`) + + checkRequest := func(method, url string) { + t.Run(method+" "+url, func(t *testing.T) { + assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}") + }) + } + + checkRequest("GET", "/v1/health/node/web") + checkRequest("GET", "/v1/health/checks/web") + checkRequest("GET", "/v1/health/state/web") + checkRequest("GET", "/v1/health/service/web") + checkRequest("GET", "/v1/health/connect/web") + checkRequest("GET", "/v1/health/ingress/web") +} + func TestHealthChecksInState(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/http.go b/agent/http.go index 506377074a6ec..65995ebd742f1 100644 --- a/agent/http.go +++ b/agent/http.go @@ -23,13 +23,12 @@ import ( "github.com/NYTimes/gziphandler" "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" + "github.com/hashicorp/go-cleanhttp" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/config" @@ -425,6 +424,11 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc } logURL = aclEndpointRE.ReplaceAllString(logURL, "$1$4") + rejectCatalogV1Endpoint := false + if s.agent.baseDeps.UseV2Resources() { + rejectCatalogV1Endpoint = isV1CatalogRequest(req.URL.Path) + } + if s.denylist.Block(req.URL.Path) { errMsg := "Endpoint is blocked by agent configuration" httpLogger.Error("Request error", @@ -488,6 +492,14 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc return strings.Contains(err.Error(), rate.ErrRetryLater.Error()) } + isUsingV2CatalogExperiment := func(err error) bool { + if err == nil { + return false + } + + return structs.IsErrUsingV2CatalogExperiment(err) + } + isMethodNotAllowed := func(err error) bool { _, ok := err.(MethodNotAllowedError) return ok @@ -523,6 +535,10 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc msg = s.Message() } + if isUsingV2CatalogExperiment(err) && !isHTTPError(err) { + err = newRejectV1RequestWhenV2EnabledError() + } + switch { case isForbidden(err): resp.WriteHeader(http.StatusForbidden) @@ -599,7 +615,12 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc if err == nil { // Invoke the handler - obj, err = handler(resp, req) + if rejectCatalogV1Endpoint { + obj = nil + err = s.rejectV1RequestWhenV2Enabled() + } else { + obj, err = handler(resp, req) + } } } contentType := "application/json" @@ -645,6 +666,46 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc } } +func isV1CatalogRequest(logURL string) bool { + switch { + case strings.HasPrefix(logURL, "/v1/catalog/"), + strings.HasPrefix(logURL, "/v1/health/"), + strings.HasPrefix(logURL, "/v1/config/"): + return true + + case strings.HasPrefix(logURL, "/v1/agent/token/"), + logURL == "/v1/agent/self", + logURL == "/v1/agent/host", + logURL == "/v1/agent/version", + logURL == "/v1/agent/reload", + logURL == "/v1/agent/monitor", + logURL == "/v1/agent/metrics", + logURL == "/v1/agent/metrics/stream", + logURL == "/v1/agent/members", + strings.HasPrefix(logURL, "/v1/agent/join/"), + logURL == "/v1/agent/leave", + strings.HasPrefix(logURL, "/v1/agent/force-leave/"), + logURL == "/v1/agent/connect/authorize", + logURL == "/v1/agent/connect/ca/roots", + strings.HasPrefix(logURL, "/v1/agent/connect/ca/leaf/"): + return false + + case strings.HasPrefix(logURL, "/v1/agent/"): + return true + + case logURL == "/v1/internal/acl/authorize", + logURL == "/v1/internal/service-virtual-ip", + logURL == "/v1/internal/ui/oidc-auth-methods", + strings.HasPrefix(logURL, "/v1/internal/ui/metrics-proxy/"): + return false + + case strings.HasPrefix(logURL, "/v1/internal/"): + return true + default: + return false + } +} + // marshalJSON marshals the object into JSON, respecting the user's pretty-ness // configuration. func (s *HTTPHandlers) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) { @@ -1121,6 +1182,20 @@ func (s *HTTPHandlers) parseToken(req *http.Request, token *string) { s.parseTokenWithDefault(req, token) } +func (s *HTTPHandlers) rejectV1RequestWhenV2Enabled() error { + if s.agent.baseDeps.UseV2Resources() { + return newRejectV1RequestWhenV2EnabledError() + } + return nil +} + +func newRejectV1RequestWhenV2EnabledError() error { + return HTTPError{ + StatusCode: http.StatusBadRequest, + Reason: structs.ErrUsingV2CatalogExperiment.Error(), + } +} + func sourceAddrFromRequest(req *http.Request) string { xff := req.Header.Get("X-Forwarded-For") forwardHosts := strings.Split(xff, ",") diff --git a/agent/leafcert/generate.go b/agent/leafcert/generate.go index dc9c3b2871013..19dbdbbaf4bbb 100644 --- a/agent/leafcert/generate.go +++ b/agent/leafcert/generate.go @@ -230,6 +230,15 @@ func (m *Manager) generateNewLeaf( var ipAddresses []net.IP switch { + case req.WorkloadIdentity != "": + id = &connect.SpiffeIDWorkloadIdentity{ + TrustDomain: roots.TrustDomain, + Partition: req.TargetPartition(), + Namespace: req.TargetNamespace(), + WorkloadIdentity: req.WorkloadIdentity, + } + dnsNames = append(dnsNames, req.DNSSAN...) + case req.Service != "": id = &connect.SpiffeIDService{ Host: roots.TrustDomain, @@ -272,7 +281,7 @@ func (m *Manager) generateNewLeaf( dnsNames = append(dnsNames, connect.PeeringServerSAN(req.Datacenter, roots.TrustDomain)) default: - return nil, newState, errors.New("URI must be either service, agent, server, or kind") + return nil, newState, errors.New("URI must be either workload identity, service, agent, server, or kind") } // Create a new private key diff --git a/agent/leafcert/leafcert_test_helpers.go b/agent/leafcert/leafcert_test_helpers.go index 5b0b3226cb383..0779033dccfd4 100644 --- a/agent/leafcert/leafcert_test_helpers.go +++ b/agent/leafcert/leafcert_test_helpers.go @@ -180,10 +180,16 @@ func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) ( return nil, fmt.Errorf("error parsing CSR URI: %w", err) } + var isService bool var serviceID *connect.SpiffeIDService + var workloadID *connect.SpiffeIDWorkloadIdentity + switch spiffeID.(type) { case *connect.SpiffeIDService: + isService = true serviceID = spiffeID.(*connect.SpiffeIDService) + case *connect.SpiffeIDWorkloadIdentity: + workloadID = spiffeID.(*connect.SpiffeIDWorkloadIdentity) default: return nil, fmt.Errorf("unexpected spiffeID type %T", spiffeID) } @@ -264,19 +270,35 @@ func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) ( } index := s.nextIndex() - // Service Spiffe ID case - return &structs.IssuedCert{ - SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber), - CertPEM: leafPEM, - Service: serviceID.Service, - ServiceURI: leafCert.URIs[0].String(), - ValidAfter: leafCert.NotBefore, - ValidBefore: leafCert.NotAfter, - RaftIndex: structs.RaftIndex{ - CreateIndex: index, - ModifyIndex: index, - }, - }, nil + if isService { + // Service Spiffe ID case + return &structs.IssuedCert{ + SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber), + CertPEM: leafPEM, + Service: serviceID.Service, + ServiceURI: leafCert.URIs[0].String(), + ValidAfter: leafCert.NotBefore, + ValidBefore: leafCert.NotAfter, + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + }, nil + } else { + // Workload identity Spiffe ID case + return &structs.IssuedCert{ + SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber), + CertPEM: leafPEM, + WorkloadIdentity: workloadID.WorkloadIdentity, + WorkloadIdentityURI: leafCert.URIs[0].String(), + ValidAfter: leafCert.NotBefore, + ValidBefore: leafCert.NotAfter, + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + }, nil + } } type testRootsReader struct { diff --git a/agent/leafcert/structs.go b/agent/leafcert/structs.go index 8cd7375731fcf..685756c8dc8c5 100644 --- a/agent/leafcert/structs.go +++ b/agent/leafcert/structs.go @@ -31,16 +31,27 @@ type ConnectCALeafRequest struct { // The following flags indicate the entity we are requesting a cert for. // Only one of these must be specified. - Service string // Given a Service name, not ID, the request is for a SpiffeIDService. - Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent. - Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported. - Server bool // If true, the request is for a SpiffeIDServer. + WorkloadIdentity string // Given a WorkloadIdentity name, the request is for a SpiffeIDWorkload. + Service string // Given a Service name, not ID, the request is for a SpiffeIDService. + Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent. + Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported. + Server bool // If true, the request is for a SpiffeIDServer. } func (r *ConnectCALeafRequest) Key() string { r.EnterpriseMeta.Normalize() switch { + case r.WorkloadIdentity != "": + v, err := hashstructure.Hash([]any{ + r.WorkloadIdentity, + r.EnterpriseMeta, + r.DNSSAN, + r.IPSAN, + }, nil) + if err == nil { + return fmt.Sprintf("workloadidentity:%d", v) + } case r.Agent != "": v, err := hashstructure.Hash([]any{ r.Agent, diff --git a/agent/proxycfg-sources/catalog/config_source.go b/agent/proxycfg-sources/catalog/config_source.go index ec4aabeeb143a..deb1bbeac84cb 100644 --- a/agent/proxycfg-sources/catalog/config_source.go +++ b/agent/proxycfg-sources/catalog/config_source.go @@ -17,6 +17,8 @@ import ( "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" + "github.com/hashicorp/consul/proto-public/pbresource" ) const source proxycfg.ProxySource = "catalog" @@ -51,11 +53,13 @@ func NewConfigSource(cfg Config) *ConfigSource { // Watch wraps the underlying proxycfg.Manager and dynamically registers // services from the catalog with it when requested by the xDS server. -func (m *ConfigSource) Watch(serviceID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error) { +func (m *ConfigSource) Watch(id *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error) { + // Create service ID + serviceID := structs.NewServiceID(id.Name, GetEnterpriseMetaFromResourceID(id)) // If the service is registered to the local agent, use the LocalConfigSource // rather than trying to configure it from the catalog. if nodeName == m.NodeName && m.LocalState.ServiceExists(serviceID) { - return m.LocalConfigSource.Watch(serviceID, nodeName, token) + return m.LocalConfigSource.Watch(id, nodeName, token) } // Begin a session with the xDS session concurrency limiter. @@ -286,7 +290,7 @@ type Config struct { //go:generate mockery --name ConfigManager --inpackage type ConfigManager interface { - Watch(req proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc) + Watch(req proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) Register(proxyID proxycfg.ProxyID, service *structs.NodeService, source proxycfg.ProxySource, token string, overwrite bool) error Deregister(proxyID proxycfg.ProxyID, source proxycfg.ProxySource) } @@ -299,7 +303,7 @@ type Store interface { //go:generate mockery --name Watcher --inpackage type Watcher interface { - Watch(proxyID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error) + Watch(proxyID *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error) } //go:generate mockery --name SessionLimiter --inpackage diff --git a/agent/proxycfg-sources/catalog/config_source_oss.go b/agent/proxycfg-sources/catalog/config_source_oss.go new file mode 100644 index 0000000000000..233ad64cee8fa --- /dev/null +++ b/agent/proxycfg-sources/catalog/config_source_oss.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !consulent + +package catalog + +import ( + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +func GetEnterpriseMetaFromResourceID(id *pbresource.ID) *acl.EnterpriseMeta { + return acl.DefaultEnterpriseMeta() +} diff --git a/agent/proxycfg-sources/catalog/config_source_test.go b/agent/proxycfg-sources/catalog/config_source_test.go index 79a7a85789021..7b267023a6144 100644 --- a/agent/proxycfg-sources/catalog/config_source_test.go +++ b/agent/proxycfg-sources/catalog/config_source_test.go @@ -10,11 +10,10 @@ import ( "testing" "time" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/grpc-external/limiter" @@ -22,6 +21,9 @@ import ( "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" + rtest "github.com/hashicorp/consul/internal/resource/resourcetest" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" ) func TestConfigSource_Success(t *testing.T) { @@ -78,15 +80,15 @@ func TestConfigSource_Success(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - snapCh, termCh, _, cancelWatch1, err := mgr.Watch(serviceID, nodeName, token) + snapCh, termCh, _, cancelWatch1, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) require.NoError(t, err) require.Equal(t, session1TermCh, termCh) // Expect Register to have been called with the proxy's inital port. select { case snap := <-snapCh: - require.Equal(t, 9999, snap.Port) - require.Equal(t, token, snap.ProxyID.Token) + require.Equal(t, 9999, snap.(*proxycfg.ConfigSnapshot).Port) + require.Equal(t, token, snap.(*proxycfg.ConfigSnapshot).ProxyID.Token) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for snapshot") } @@ -110,7 +112,7 @@ func TestConfigSource_Success(t *testing.T) { // Expect Register to have been called again with the proxy's new port. select { case snap := <-snapCh: - require.Equal(t, 8888, snap.Port) + require.Equal(t, 8888, snap.(*proxycfg.ConfigSnapshot).Port) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for snapshot") } @@ -129,13 +131,13 @@ func TestConfigSource_Success(t *testing.T) { require.Equal(t, map[string]any{ "local_connect_timeout_ms": 123, "max_inbound_connections": 321, - }, snap.Proxy.Config) + }, snap.(*proxycfg.ConfigSnapshot).Proxy.Config) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for snapshot") } // Start another watch. - _, termCh2, _, cancelWatch2, err := mgr.Watch(serviceID, nodeName, token) + _, termCh2, _, cancelWatch2, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) require.NoError(t, err) require.Equal(t, session2TermCh, termCh2) @@ -169,7 +171,7 @@ func TestConfigSource_Success(t *testing.T) { func TestConfigSource_LocallyManagedService(t *testing.T) { serviceID := structs.NewServiceID("web-sidecar-proxy-1", nil) - proxyID := serviceID + proxyID := rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID() nodeName := "node-1" token := "token" @@ -178,7 +180,7 @@ func TestConfigSource_LocallyManagedService(t *testing.T) { localWatcher := NewMockWatcher(t) localWatcher.On("Watch", proxyID, nodeName, token). - Return(make(<-chan *proxycfg.ConfigSnapshot), nil, nil, context.CancelFunc(func() {}), nil) + Return(make(<-chan proxysnapshot.ProxySnapshot), nil, nil, proxysnapshot.CancelFunc(func() {}), nil) mgr := NewConfigSource(Config{ NodeName: nodeName, @@ -212,12 +214,12 @@ func TestConfigSource_ErrorRegisteringService(t *testing.T) { })) var canceledWatch bool - cancel := context.CancelFunc(func() { canceledWatch = true }) + cancel := proxysnapshot.CancelFunc(func() { canceledWatch = true }) cfgMgr := NewMockConfigManager(t) cfgMgr.On("Watch", mock.Anything). - Return(make(<-chan *proxycfg.ConfigSnapshot), cancel) + Return(make(<-chan proxysnapshot.ProxySnapshot), cancel) cfgMgr.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(errors.New("KABOOM")) @@ -237,7 +239,7 @@ func TestConfigSource_ErrorRegisteringService(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - _, _, _, _, err := mgr.Watch(serviceID, nodeName, token) + _, _, _, _, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) require.Error(t, err) require.True(t, canceledWatch, "watch should've been canceled") @@ -274,9 +276,9 @@ func TestConfigSource_ErrorInSyncLoop(t *testing.T) { NodeName: nodeName, Token: token, } - snapCh := make(chan *proxycfg.ConfigSnapshot, 1) + snapCh := make(chan proxysnapshot.ProxySnapshot, 1) cfgMgr.On("Watch", proxyID). - Return((<-chan *proxycfg.ConfigSnapshot)(snapCh), context.CancelFunc(func() {}), nil) + Return((<-chan proxysnapshot.ProxySnapshot)(snapCh), proxysnapshot.CancelFunc(func() {}), nil) // Answer the register call successfully for session 1 starting (Repeatability = 1). // Session 2 should not have caused a re-register to happen. @@ -328,21 +330,21 @@ func TestConfigSource_ErrorInSyncLoop(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - snapCh, termCh, cfgSrcTerminated1, cancelWatch1, err := mgr.Watch(serviceID, nodeName, token) + snapCh, termCh, cfgSrcTerminated1, cancelWatch1, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) require.NoError(t, err) require.Equal(t, session1TermCh, termCh) // Expect Register to have been called with the proxy's inital port. select { case snap := <-snapCh: - require.Equal(t, 9999, snap.Port) - require.Equal(t, token, snap.ProxyID.Token) + require.Equal(t, 9999, snap.(*proxycfg.ConfigSnapshot).Port) + require.Equal(t, token, snap.(*proxycfg.ConfigSnapshot).ProxyID.Token) case <-time.After(100 * time.Millisecond): t.Fatal("timeout waiting for snapshot") } // Start another watch. - _, termCh2, cfgSrcTerminated2, cancelWatch2, err := mgr.Watch(serviceID, nodeName, token) + _, termCh2, cfgSrcTerminated2, cancelWatch2, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) require.NoError(t, err) require.Equal(t, session2TermCh, termCh2) @@ -422,12 +424,12 @@ func TestConfigSource_NotProxyService(t *testing.T) { })) var canceledWatch bool - cancel := context.CancelFunc(func() { canceledWatch = true }) + cancel := proxysnapshot.CancelFunc(func() { canceledWatch = true }) cfgMgr := NewMockConfigManager(t) cfgMgr.On("Watch", mock.Anything). - Return(make(<-chan *proxycfg.ConfigSnapshot), cancel) + Return(make(<-chan proxysnapshot.ProxySnapshot), cancel) mgr := NewConfigSource(Config{ Manager: cfgMgr, @@ -438,7 +440,7 @@ func TestConfigSource_NotProxyService(t *testing.T) { }) t.Cleanup(mgr.Shutdown) - _, _, _, _, err := mgr.Watch(serviceID, nodeName, token) + _, _, _, _, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token) require.Error(t, err) require.Contains(t, err.Error(), "must be a sidecar proxy or gateway") require.True(t, canceledWatch, "watch should've been canceled") @@ -455,7 +457,7 @@ func TestConfigSource_SessionLimiterError(t *testing.T) { t.Cleanup(src.Shutdown) _, _, _, _, err := src.Watch( - structs.NewServiceID("web-sidecar-proxy-1", nil), + rtest.Resource(pbmesh.ProxyConfigurationType, "web-sidecar-proxy-1").ID(), "node-name", "token", ) @@ -473,9 +475,9 @@ func testConfigManager(t *testing.T, serviceID structs.ServiceID, nodeName strin Token: token, } - snapCh := make(chan *proxycfg.ConfigSnapshot, 1) + snapCh := make(chan proxysnapshot.ProxySnapshot, 1) cfgMgr.On("Watch", proxyID). - Return((<-chan *proxycfg.ConfigSnapshot)(snapCh), context.CancelFunc(func() {}), nil) + Return((<-chan proxysnapshot.ProxySnapshot)(snapCh), proxysnapshot.CancelFunc(func() {}), nil) cfgMgr.On("Register", mock.Anything, mock.Anything, source, token, false). Run(func(args mock.Arguments) { diff --git a/agent/proxycfg-sources/catalog/mock_ConfigManager.go b/agent/proxycfg-sources/catalog/mock_ConfigManager.go index dbd82e702b7de..2c1608f241503 100644 --- a/agent/proxycfg-sources/catalog/mock_ConfigManager.go +++ b/agent/proxycfg-sources/catalog/mock_ConfigManager.go @@ -5,8 +5,8 @@ package catalog import ( proxycfg "github.com/hashicorp/consul/agent/proxycfg" mock "github.com/stretchr/testify/mock" - "context" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" structs "github.com/hashicorp/consul/agent/structs" ) @@ -36,27 +36,27 @@ func (_m *MockConfigManager) Register(proxyID proxycfg.ProxyID, service *structs } // Watch provides a mock function with given fields: req -func (_m *MockConfigManager) Watch(req proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc) { +func (_m *MockConfigManager) Watch(req proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) { ret := _m.Called(req) - var r0 <-chan *proxycfg.ConfigSnapshot - var r1 context.CancelFunc - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc)); ok { + var r0 <-chan proxysnapshot.ProxySnapshot + var r1 proxysnapshot.CancelFunc + if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)); ok { return rf(req) } - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan *proxycfg.ConfigSnapshot); ok { + if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan proxysnapshot.ProxySnapshot); ok { r0 = rf(req) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot) + r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot) } } - if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) context.CancelFunc); ok { + if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxysnapshot.CancelFunc); ok { r1 = rf(req) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).(context.CancelFunc) + r1 = ret.Get(1).(proxysnapshot.CancelFunc) } } diff --git a/agent/proxycfg-sources/catalog/mock_Watcher.go b/agent/proxycfg-sources/catalog/mock_Watcher.go index 1fc6ba7c6ea3b..f77ca132839ed 100644 --- a/agent/proxycfg-sources/catalog/mock_Watcher.go +++ b/agent/proxycfg-sources/catalog/mock_Watcher.go @@ -5,9 +5,12 @@ package catalog import ( limiter "github.com/hashicorp/consul/agent/grpc-external/limiter" mock "github.com/stretchr/testify/mock" - "github.com/hashicorp/consul/agent/structs" + + pbresource "github.com/hashicorp/consul/proto-public/pbresource" + proxycfg "github.com/hashicorp/consul/agent/proxycfg" - "context" + + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" ) // MockWatcher is an autogenerated mock type for the Watcher type @@ -16,26 +19,26 @@ type MockWatcher struct { } // Watch provides a mock function with given fields: proxyID, nodeName, token -func (_m *MockWatcher) Watch(proxyID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error) { +func (_m *MockWatcher) Watch(proxyID *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error) { ret := _m.Called(proxyID, nodeName, token) - var r0 <-chan *proxycfg.ConfigSnapshot + var r0 <-chan proxysnapshot.ProxySnapshot var r1 limiter.SessionTerminatedChan var r2 proxycfg.SrcTerminatedChan - var r3 context.CancelFunc + var r3 proxysnapshot.CancelFunc var r4 error - if rf, ok := ret.Get(0).(func(structs.ServiceID, string, string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error)); ok { + if rf, ok := ret.Get(0).(func(*pbresource.ID, string, string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error)); ok { return rf(proxyID, nodeName, token) } - if rf, ok := ret.Get(0).(func(structs.ServiceID, string, string) <-chan *proxycfg.ConfigSnapshot); ok { + if rf, ok := ret.Get(0).(func(*pbresource.ID, string, string) <-chan proxysnapshot.ProxySnapshot); ok { r0 = rf(proxyID, nodeName, token) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot) + r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot) } } - if rf, ok := ret.Get(1).(func(structs.ServiceID, string, string) limiter.SessionTerminatedChan); ok { + if rf, ok := ret.Get(1).(func(*pbresource.ID, string, string) limiter.SessionTerminatedChan); ok { r1 = rf(proxyID, nodeName, token) } else { if ret.Get(1) != nil { @@ -43,7 +46,7 @@ func (_m *MockWatcher) Watch(proxyID structs.ServiceID, nodeName string, token s } } - if rf, ok := ret.Get(2).(func(structs.ServiceID, string, string) proxycfg.SrcTerminatedChan); ok { + if rf, ok := ret.Get(2).(func(*pbresource.ID, string, string) proxycfg.SrcTerminatedChan); ok { r2 = rf(proxyID, nodeName, token) } else { if ret.Get(2) != nil { @@ -51,15 +54,15 @@ func (_m *MockWatcher) Watch(proxyID structs.ServiceID, nodeName string, token s } } - if rf, ok := ret.Get(3).(func(structs.ServiceID, string, string) context.CancelFunc); ok { + if rf, ok := ret.Get(3).(func(*pbresource.ID, string, string) proxysnapshot.CancelFunc); ok { r3 = rf(proxyID, nodeName, token) } else { if ret.Get(3) != nil { - r3 = ret.Get(3).(context.CancelFunc) + r3 = ret.Get(3).(proxysnapshot.CancelFunc) } } - if rf, ok := ret.Get(4).(func(structs.ServiceID, string, string) error); ok { + if rf, ok := ret.Get(4).(func(*pbresource.ID, string, string) error); ok { r4 = rf(proxyID, nodeName, token) } else { r4 = ret.Error(4) diff --git a/agent/proxycfg-sources/local/config_source.go b/agent/proxycfg-sources/local/config_source.go index e3176c597d99d..d30edc1b7bb21 100644 --- a/agent/proxycfg-sources/local/config_source.go +++ b/agent/proxycfg-sources/local/config_source.go @@ -4,11 +4,12 @@ package local import ( - "context" - "github.com/hashicorp/consul/agent/grpc-external/limiter" "github.com/hashicorp/consul/agent/proxycfg" + "github.com/hashicorp/consul/agent/proxycfg-sources/catalog" structs "github.com/hashicorp/consul/agent/structs" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" + "github.com/hashicorp/consul/proto-public/pbresource" ) // ConfigSource wraps a proxycfg.Manager to create watches on services @@ -22,13 +23,14 @@ func NewConfigSource(cfgMgr ConfigManager) *ConfigSource { return &ConfigSource{cfgMgr} } -func (m *ConfigSource) Watch(serviceID structs.ServiceID, nodeName string, _ string) ( - <-chan *proxycfg.ConfigSnapshot, +func (m *ConfigSource) Watch(proxyID *pbresource.ID, nodeName string, _ string) ( + <-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, - context.CancelFunc, + proxysnapshot.CancelFunc, error, ) { + serviceID := structs.NewServiceID(proxyID.Name, catalog.GetEnterpriseMetaFromResourceID(proxyID)) watchCh, cancelWatch := m.manager.Watch(proxycfg.ProxyID{ ServiceID: serviceID, NodeName: nodeName, diff --git a/agent/proxycfg-sources/local/mock_ConfigManager.go b/agent/proxycfg-sources/local/mock_ConfigManager.go index 66b204d1312fe..e3b2d3a445872 100644 --- a/agent/proxycfg-sources/local/mock_ConfigManager.go +++ b/agent/proxycfg-sources/local/mock_ConfigManager.go @@ -5,8 +5,8 @@ package local import ( proxycfg "github.com/hashicorp/consul/agent/proxycfg" mock "github.com/stretchr/testify/mock" - "context" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" structs "github.com/hashicorp/consul/agent/structs" ) @@ -52,27 +52,27 @@ func (_m *MockConfigManager) RegisteredProxies(source proxycfg.ProxySource) []pr } // Watch provides a mock function with given fields: id -func (_m *MockConfigManager) Watch(id proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc) { +func (_m *MockConfigManager) Watch(id proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) { ret := _m.Called(id) - var r0 <-chan *proxycfg.ConfigSnapshot - var r1 context.CancelFunc - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc)); ok { + var r0 <-chan proxysnapshot.ProxySnapshot + var r1 proxysnapshot.CancelFunc + if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)); ok { return rf(id) } - if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan *proxycfg.ConfigSnapshot); ok { + if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan proxysnapshot.ProxySnapshot); ok { r0 = rf(id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot) + r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot) } } - if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) context.CancelFunc); ok { + if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxysnapshot.CancelFunc); ok { r1 = rf(id) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).(context.CancelFunc) + r1 = ret.Get(1).(proxysnapshot.CancelFunc) } } diff --git a/agent/proxycfg-sources/local/sync.go b/agent/proxycfg-sources/local/sync.go index a8047c82f1151..54d95e6594f24 100644 --- a/agent/proxycfg-sources/local/sync.go +++ b/agent/proxycfg-sources/local/sync.go @@ -7,6 +7,8 @@ import ( "context" "time" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/agent/local" @@ -146,7 +148,7 @@ func sync(cfg SyncConfig) { //go:generate mockery --name ConfigManager --inpackage type ConfigManager interface { - Watch(id proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc) + Watch(id proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) Register(proxyID proxycfg.ProxyID, service *structs.NodeService, source proxycfg.ProxySource, token string, overwrite bool) error Deregister(proxyID proxycfg.ProxyID, source proxycfg.ProxySource) RegisteredProxies(source proxycfg.ProxySource) []proxycfg.ProxyID diff --git a/agent/proxycfg/manager.go b/agent/proxycfg/manager.go index f2f7978a0a03e..4d3dd6cbc705d 100644 --- a/agent/proxycfg/manager.go +++ b/agent/proxycfg/manager.go @@ -4,17 +4,17 @@ package proxycfg import ( - "context" "errors" "runtime/debug" "sync" - "golang.org/x/time/rate" + "github.com/hashicorp/consul/lib/channels" "github.com/hashicorp/go-hclog" + "golang.org/x/time/rate" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/lib/channels" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" "github.com/hashicorp/consul/tlsutil" ) @@ -58,7 +58,7 @@ type Manager struct { mu sync.Mutex proxies map[ProxyID]*state - watchers map[ProxyID]map[uint64]chan *ConfigSnapshot + watchers map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot maxWatchID uint64 } @@ -109,7 +109,7 @@ func NewManager(cfg ManagerConfig) (*Manager, error) { m := &Manager{ ManagerConfig: cfg, proxies: make(map[ProxyID]*state), - watchers: make(map[ProxyID]map[uint64]chan *ConfigSnapshot), + watchers: make(map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot), rateLimiter: rate.NewLimiter(cfg.UpdateRateLimit, 1), } return m, nil @@ -265,12 +265,12 @@ func (m *Manager) notify(snap *ConfigSnapshot) { // it will drain the chan and then re-attempt delivery so that a slow consumer // gets the latest config earlier. This MUST be called from a method where m.mu // is held to be safe since it assumes we are the only goroutine sending on ch. -func (m *Manager) deliverLatest(snap *ConfigSnapshot, ch chan *ConfigSnapshot) { - m.Logger.Trace("delivering latest proxy snapshot to proxy", "proxyID", snap.ProxyID) +func (m *Manager) deliverLatest(snap proxysnapshot.ProxySnapshot, ch chan proxysnapshot.ProxySnapshot) { + m.Logger.Trace("delivering latest proxy snapshot to proxy", "proxyID", snap.(*ConfigSnapshot).ProxyID) err := channels.DeliverLatest(snap, ch) if err != nil { m.Logger.Error("failed to deliver proxyState to proxy", - "proxy", snap.ProxyID, + "proxy", snap.(*ConfigSnapshot).ProxyID, ) } @@ -280,16 +280,16 @@ func (m *Manager) deliverLatest(snap *ConfigSnapshot, ch chan *ConfigSnapshot) { // will not fail, but no updates will be delivered until the proxy is // registered. If there is already a valid snapshot in memory, it will be // delivered immediately. -func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, context.CancelFunc) { +func (m *Manager) Watch(id ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) { m.mu.Lock() defer m.mu.Unlock() // This buffering is crucial otherwise we'd block immediately trying to // deliver the current snapshot below if we already have one. - ch := make(chan *ConfigSnapshot, 1) + ch := make(chan proxysnapshot.ProxySnapshot, 1) watchers, ok := m.watchers[id] if !ok { - watchers = make(map[uint64]chan *ConfigSnapshot) + watchers = make(map[uint64]chan proxysnapshot.ProxySnapshot) } watchID := m.maxWatchID m.maxWatchID++ diff --git a/agent/proxycfg/manager_test.go b/agent/proxycfg/manager_test.go index e751364ddb0c1..7c83b5c770d26 100644 --- a/agent/proxycfg/manager_test.go +++ b/agent/proxycfg/manager_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/consul/agent/proxycfg/internal/watch" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" "github.com/hashicorp/consul/proto/private/pbpeering" "github.com/hashicorp/consul/sdk/testutil" ) @@ -470,7 +471,7 @@ func testManager_BasicLifecycle( require.Len(t, m.watchers, 0) } -func assertWatchChanBlocks(t *testing.T, ch <-chan *ConfigSnapshot) { +func assertWatchChanBlocks(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot) { t.Helper() select { @@ -480,7 +481,7 @@ func assertWatchChanBlocks(t *testing.T, ch <-chan *ConfigSnapshot) { } } -func assertWatchChanRecvs(t *testing.T, ch <-chan *ConfigSnapshot, expect *ConfigSnapshot) { +func assertWatchChanRecvs(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot, expect proxysnapshot.ProxySnapshot) { t.Helper() select { @@ -518,7 +519,7 @@ func TestManager_deliverLatest(t *testing.T) { } // test 1 buffered chan - ch1 := make(chan *ConfigSnapshot, 1) + ch1 := make(chan proxysnapshot.ProxySnapshot, 1) // Sending to an unblocked chan should work m.deliverLatest(snap1, ch1) @@ -534,7 +535,7 @@ func TestManager_deliverLatest(t *testing.T) { require.Equal(t, snap2, <-ch1) // Same again for 5-buffered chan - ch5 := make(chan *ConfigSnapshot, 5) + ch5 := make(chan proxysnapshot.ProxySnapshot, 5) // Sending to an unblocked chan should work m.deliverLatest(snap1, ch5) diff --git a/agent/proxycfg_test.go b/agent/proxycfg_test.go index d692c25789655..568d6164867ef 100644 --- a/agent/proxycfg_test.go +++ b/agent/proxycfg_test.go @@ -4,7 +4,6 @@ package agent import ( - "context" "encoding/json" "net/http" "net/http/httptest" @@ -14,9 +13,11 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/grpc-external/limiter" - "github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" + proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot" + rtest "github.com/hashicorp/consul/internal/resource/resourcetest" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" "github.com/hashicorp/consul/testrpc" ) @@ -63,9 +64,9 @@ func TestAgent_local_proxycfg(t *testing.T) { var ( firstTime = true - ch <-chan *proxycfg.ConfigSnapshot + ch <-chan proxysnapshot.ProxySnapshot stc limiter.SessionTerminatedChan - cancel context.CancelFunc + cancel proxysnapshot.CancelFunc ) defer func() { if cancel != nil { @@ -86,7 +87,7 @@ func TestAgent_local_proxycfg(t *testing.T) { // Prior to fixes in https://github.com/hashicorp/consul/pull/16497 // this call to Watch() would deadlock. var err error - ch, stc, _, cancel, err = cfg.Watch(sid, a.config.NodeName, token) + ch, stc, _, cancel, err = cfg.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, sid.ID).ID(), a.config.NodeName, token) require.NoError(t, err) } select { diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index f385a11c28cf5..efc3bff697bb7 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -16,6 +16,8 @@ import ( "time" "github.com/google/tcpproxy" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/require" gogrpc "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -24,9 +26,6 @@ import ( grpcstatus "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul" @@ -1836,7 +1835,7 @@ func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer { deps := newDefaultDeps(t, conf) externalGRPCServer := external.NewServer(deps.Logger, nil, deps.TLSConfigurator, rate.NullRequestLimitsHandler(), keepalive.ServerParameters{}, nil) - server, err := consul.NewServer(conf, deps, externalGRPCServer, nil, deps.Logger) + server, err := consul.NewServer(conf, deps, externalGRPCServer, nil, deps.Logger, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, server.Shutdown()) diff --git a/agent/structs/acl.go b/agent/structs/acl.go index 579e8d231e22f..d856ce0af2eaf 100644 --- a/agent/structs/acl.go +++ b/agent/structs/acl.go @@ -13,12 +13,13 @@ import ( "strings" "time" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/lib/stringslice" + "golang.org/x/crypto/blake2b" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/lib/stringslice" ) type ACLMode string @@ -62,6 +63,10 @@ agent_prefix "" { event_prefix "" { policy = "%[1]s" } +identity_prefix "" { + policy = "%[1]s" + intentions = "%[1]s" +} key_prefix "" { policy = "%[1]s" } diff --git a/agent/structs/acl_templated_policy.go b/agent/structs/acl_templated_policy.go index 076d6ae256e87..52bdb0d66f618 100644 --- a/agent/structs/acl_templated_policy.go +++ b/agent/structs/acl_templated_policy.go @@ -11,11 +11,10 @@ import ( "hash/fnv" "text/template" + "github.com/hashicorp/go-multierror" "github.com/xeipuuv/gojsonschema" "golang.org/x/exp/slices" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib/stringslice" @@ -27,26 +26,30 @@ var ACLTemplatedPolicyNodeSchema string //go:embed acltemplatedpolicy/schemas/service.json var ACLTemplatedPolicyServiceSchema string +//go:embed acltemplatedpolicy/schemas/workload-identity.json +var ACLTemplatedPolicyWorkloadIdentitySchema string + //go:embed acltemplatedpolicy/schemas/api-gateway.json var ACLTemplatedPolicyAPIGatewaySchema string type ACLTemplatedPolicies []*ACLTemplatedPolicy const ( - ACLTemplatedPolicyServiceID = "00000000-0000-0000-0000-000000000003" - ACLTemplatedPolicyNodeID = "00000000-0000-0000-0000-000000000004" - ACLTemplatedPolicyDNSID = "00000000-0000-0000-0000-000000000005" - ACLTemplatedPolicyNomadServerID = "00000000-0000-0000-0000-000000000006" - _ = "00000000-0000-0000-0000-000000000007" // formerly workload identity - ACLTemplatedPolicyAPIGatewayID = "00000000-0000-0000-0000-000000000008" - ACLTemplatedPolicyNomadClientID = "00000000-0000-0000-0000-000000000009" - - ACLTemplatedPolicyServiceDescription = "Gives the token or role permissions to register a service and discover services in the Consul catalog. It also gives the specified service's sidecar proxy the permission to discover and route traffic to other services." - ACLTemplatedPolicyNodeDescription = "Gives the token or role permissions for a register an agent/node into the catalog. A node is typically a consul agent but can also be a physical server, cloud instance or a container." - ACLTemplatedPolicyDNSDescription = "Gives the token or role permissions for the Consul DNS to query services in the network." - ACLTemplatedPolicyNomadServerDescription = "Gives the token or role permissions required for integration with a nomad server." - ACLTemplatedPolicyAPIGatewayDescription = "Gives the token or role permissions for a Consul api gateway" - ACLTemplatedPolicyNomadClientDescription = "Gives the token or role permissions required for integration with a nomad client." + ACLTemplatedPolicyServiceID = "00000000-0000-0000-0000-000000000003" + ACLTemplatedPolicyNodeID = "00000000-0000-0000-0000-000000000004" + ACLTemplatedPolicyDNSID = "00000000-0000-0000-0000-000000000005" + ACLTemplatedPolicyNomadServerID = "00000000-0000-0000-0000-000000000006" + ACLTemplatedPolicyWorkloadIdentityID = "00000000-0000-0000-0000-000000000007" + ACLTemplatedPolicyAPIGatewayID = "00000000-0000-0000-0000-000000000008" + ACLTemplatedPolicyNomadClientID = "00000000-0000-0000-0000-000000000009" + + ACLTemplatedPolicyServiceDescription = "Gives the token or role permissions to register a service and discover services in the Consul catalog. It also gives the specified service's sidecar proxy the permission to discover and route traffic to other services." + ACLTemplatedPolicyNodeDescription = "Gives the token or role permissions for a register an agent/node into the catalog. A node is typically a consul agent but can also be a physical server, cloud instance or a container." + ACLTemplatedPolicyDNSDescription = "Gives the token or role permissions for the Consul DNS to query services in the network." + ACLTemplatedPolicyNomadServerDescription = "Gives the token or role permissions required for integration with a nomad server." + ACLTemplatedPolicyWorkloadIdentityDescription = "Gives the token or role permissions for a specific workload identity." + ACLTemplatedPolicyAPIGatewayDescription = "Gives the token or role permissions for a Consul api gateway" + ACLTemplatedPolicyNomadClientDescription = "Gives the token or role permissions required for integration with a nomad client." ACLTemplatedPolicyNoRequiredVariablesSchema = "" // catch-all schema for all templated policy that don't require a schema ) @@ -93,6 +96,13 @@ var ( Template: ACLTemplatedPolicyNomadServer, Description: ACLTemplatedPolicyNomadServerDescription, }, + api.ACLTemplatedPolicyWorkloadIdentityName: { + TemplateID: ACLTemplatedPolicyWorkloadIdentityID, + TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName, + Schema: ACLTemplatedPolicyWorkloadIdentitySchema, + Template: ACLTemplatedPolicyWorkloadIdentity, + Description: ACLTemplatedPolicyWorkloadIdentityDescription, + }, api.ACLTemplatedPolicyAPIGatewayName: { TemplateID: ACLTemplatedPolicyAPIGatewayID, TemplateName: api.ACLTemplatedPolicyAPIGatewayName, diff --git a/agent/structs/acl_templated_policy_ce.go b/agent/structs/acl_templated_policy_ce.go index 3cbaa22217cc0..23e656f0fb17e 100644 --- a/agent/structs/acl_templated_policy_ce.go +++ b/agent/structs/acl_templated_policy_ce.go @@ -19,6 +19,9 @@ var ACLTemplatedPolicyDNS string //go:embed acltemplatedpolicy/policies/ce/nomad-server.hcl var ACLTemplatedPolicyNomadServer string +//go:embed acltemplatedpolicy/policies/ce/workload-identity.hcl +var ACLTemplatedPolicyWorkloadIdentity string + //go:embed acltemplatedpolicy/policies/ce/api-gateway.hcl var ACLTemplatedPolicyAPIGateway string diff --git a/agent/structs/acl_templated_policy_ce_test.go b/agent/structs/acl_templated_policy_ce_test.go index 63f42ca83e6f0..f21292806283a 100644 --- a/agent/structs/acl_templated_policy_ce_test.go +++ b/agent/structs/acl_templated_policy_ce_test.go @@ -80,6 +80,21 @@ service_prefix "" { } query_prefix "" { policy = "read" +}`, + }, + }, + "workload-identity-template": { + templatedPolicy: &ACLTemplatedPolicy{ + TemplateID: ACLTemplatedPolicyWorkloadIdentityID, + TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName, + TemplateVariables: &ACLTemplatedPolicyVariables{ + Name: "api", + }, + }, + expectedPolicy: &ACLPolicy{ + Description: "synthetic policy generated from templated policy: builtin/workload-identity", + Rules: `identity "api" { + policy = "write" }`, }, }, diff --git a/agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl b/agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl new file mode 100644 index 0000000000000..ccd1e0564633d --- /dev/null +++ b/agent/structs/acltemplatedpolicy/policies/ce/workload-identity.hcl @@ -0,0 +1,3 @@ +identity "{{.Name}}" { + policy = "write" +} \ No newline at end of file diff --git a/agent/structs/acltemplatedpolicy/schemas/workload-identity.json b/agent/structs/acltemplatedpolicy/schemas/workload-identity.json new file mode 100644 index 0000000000000..31064f36af7f0 --- /dev/null +++ b/agent/structs/acltemplatedpolicy/schemas/workload-identity.json @@ -0,0 +1,13 @@ +{ + "type": "object", + "properties": { + "name": { "type": "string", "$ref": "#/definitions/min-length-one" } + }, + "required": ["name"], + "definitions": { + "min-length-one": { + "type": "string", + "minLength": 1 + } + } +} \ No newline at end of file diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 32b4e0c89de09..5d419e083295c 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -12,11 +12,12 @@ import ( "time" "github.com/miekg/dns" + + "github.com/hashicorp/go-multierror" "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" @@ -268,12 +269,6 @@ func (e *ServiceConfigEntry) Validate() error { validationErr = multierror.Append(validationErr, fmt.Errorf("invalid value for balance_inbound_connections: %v", e.BalanceInboundConnections)) } - switch e.Protocol { - case "", "http", "http2", "grpc", "tcp": - default: - validationErr = multierror.Append(validationErr, fmt.Errorf("invalid value for protocol: %v", e.Protocol)) - } - // External endpoints are invalid with an existing service's upstream configuration if e.UpstreamConfig != nil && e.Destination != nil { validationErr = multierror.Append(validationErr, errors.New("UpstreamConfig and Destination are mutually exclusive for service defaults")) diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index 4c092acc46e25..e57e2c4041348 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -10,12 +10,12 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/hashicorp/hcl" "github.com/mitchellh/copystructure" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" - "github.com/hashicorp/hcl" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" @@ -3225,14 +3225,6 @@ func TestServiceConfigEntry(t *testing.T) { }, validateErr: `Invalid MutualTLSMode "invalid-mtls-mode". Must be one of "", "strict", or "permissive".`, }, - "validate: invalid Protocol in service-defaults": { - entry: &ServiceConfigEntry{ - Kind: ServiceDefaults, - Name: "web", - Protocol: "blah", - }, - validateErr: `invalid value for protocol: blah`, - }, } testConfigEntryNormalizeAndValidate(t, cases) } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 5fa7b0771549f..267aeba5e63d9 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -8,11 +8,12 @@ import ( "reflect" "time" + "github.com/hashicorp/consul/lib/stringslice" + "github.com/mitchellh/mapstructure" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/lib/stringslice" ) const ( @@ -216,6 +217,11 @@ type IssuedCert struct { // PrivateKeyPEM is the PEM encoded private key associated with CertPEM. PrivateKeyPEM string `json:",omitempty"` + // WorkloadIdentity is the name of the workload identity for which the cert was issued. + WorkloadIdentity string `json:",omitempty"` + // WorkloadIdentityURI is the cert URI value. + WorkloadIdentityURI string `json:",omitempty"` + // Service is the name of the service for which the cert was issued. Service string `json:",omitempty"` // ServiceURI is the cert URI value. diff --git a/agent/structs/connect_proxy_config.go b/agent/structs/connect_proxy_config.go index 3bd5276f8279a..d84953e1b0e7e 100644 --- a/agent/structs/connect_proxy_config.go +++ b/agent/structs/connect_proxy_config.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" ) const ( @@ -180,6 +181,39 @@ type AccessLogsConfig struct { TextFormat string `json:",omitempty" alias:"text_format"` } +func (c *AccessLogsConfig) GetEnabled() bool { + return c.Enabled +} + +func (c *AccessLogsConfig) GetDisableListenerLogs() bool { + return c.DisableListenerLogs +} + +func (c *AccessLogsConfig) GetType() pbmesh.LogSinkType { + switch c.Type { + case FileLogSinkType: + return pbmesh.LogSinkType_LOG_SINK_TYPE_FILE + case StdErrLogSinkType: + return pbmesh.LogSinkType_LOG_SINK_TYPE_STDERR + case StdOutLogSinkType: + return pbmesh.LogSinkType_LOG_SINK_TYPE_STDOUT + } + + return pbmesh.LogSinkType_LOG_SINK_TYPE_DEFAULT +} + +func (c *AccessLogsConfig) GetPath() string { + return c.Path +} + +func (c *AccessLogsConfig) GetJsonFormat() string { + return c.JSONFormat +} + +func (c *AccessLogsConfig) GetTextFormat() string { + return c.TextFormat +} + func (c *AccessLogsConfig) IsZero() bool { if c == nil { return true @@ -805,3 +839,12 @@ func (e *ExposeConfig) Finalize() { } } } + +type AccessLogs interface { + GetEnabled() bool + GetDisableListenerLogs() bool + GetType() pbmesh.LogSinkType + GetPath() string + GetJsonFormat() string + GetTextFormat() string +} diff --git a/agent/structs/errors.go b/agent/structs/errors.go index 029f958ec4ab9..9b62de648d8ef 100644 --- a/agent/structs/errors.go +++ b/agent/structs/errors.go @@ -23,6 +23,7 @@ const ( errRateLimited = "Rate limit reached, try again later" // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError). errNotPrimaryDatacenter = "not the primary datacenter" errStateReadOnly = "CA Provider State is read-only" + errUsingV2CatalogExperiment = "V1 catalog is disabled when V2 is enabled" errSamenessGroupNotFound = "Sameness Group not found" errSamenessGroupMustBeDefaultForFailover = "Sameness Group must have DefaultForFailover set to true in order to use this endpoint" ) @@ -41,6 +42,7 @@ var ( ErrRateLimited = errors.New(errRateLimited) // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError). ErrNotPrimaryDatacenter = errors.New(errNotPrimaryDatacenter) ErrStateReadOnly = errors.New(errStateReadOnly) + ErrUsingV2CatalogExperiment = errors.New(errUsingV2CatalogExperiment) ErrSamenessGroupNotFound = errors.New(errSamenessGroupNotFound) ErrSamenessGroupMustBeDefaultForFailover = errors.New(errSamenessGroupMustBeDefaultForFailover) ) @@ -61,6 +63,10 @@ func IsErrRPCRateExceeded(err error) bool { return err != nil && strings.Contains(err.Error(), errRPCRateExceeded) } +func IsErrUsingV2CatalogExperiment(err error) bool { + return err != nil && strings.Contains(err.Error(), errUsingV2CatalogExperiment) +} + func IsErrSamenessGroupNotFound(err error) bool { return err != nil && strings.Contains(err.Error(), errSamenessGroupNotFound) } diff --git a/agent/testagent.go b/agent/testagent.go index 5f0225c42579a..a18dee1eada53 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -19,10 +19,9 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/stretchr/testify/require" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/config" @@ -106,7 +105,7 @@ type TestAgentOpts struct { // NewTestAgent returns a started agent with the given configuration. It fails // the test if the Agent could not be started. -func NewTestAgent(t testing.TB, hcl string, opts ...TestAgentOpts) *TestAgent { +func NewTestAgent(t *testing.T, hcl string, opts ...TestAgentOpts) *TestAgent { // This varargs approach is used so that we don't have to modify all of the `NewTestAgent()` calls // in order to introduce more optional arguments. require.LessOrEqual(t, len(opts), 1, "NewTestAgent cannot accept more than one opts argument") @@ -134,7 +133,7 @@ func NewTestAgentWithConfigFile(t *testing.T, hcl string, configFiles []string) // // The caller is responsible for calling Shutdown() to stop the agent and remove // temporary directories. -func StartTestAgent(t testing.TB, a TestAgent) *TestAgent { +func StartTestAgent(t *testing.T, a TestAgent) *TestAgent { t.Helper() retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) { r.Helper() @@ -316,6 +315,22 @@ func (a *TestAgent) waitForUp() error { } } + if a.baseDeps.UseV2Resources() { + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var leader string + if err := a.RPC(context.Background(), "Status.Leader", args, &leader); err != nil { + retErr = fmt.Errorf("Status.Leader failed: %v", err) + continue // fail, try again + } + if leader == "" { + retErr = fmt.Errorf("No leader") + continue // fail, try again + } + return nil // success + } + // Ensure we have a leader and a node registration. args := &structs.DCSpecificRequest{ Datacenter: a.Config.Datacenter, diff --git a/agent/ui_endpoint_test.go b/agent/ui_endpoint_test.go index 2df3cb7849890..dd1c6d8134b2f 100644 --- a/agent/ui_endpoint_test.go +++ b/agent/ui_endpoint_test.go @@ -18,11 +18,10 @@ import ( "testing" "time" + cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" @@ -33,6 +32,28 @@ import ( "github.com/hashicorp/consul/types" ) +func TestUIEndpointsFailInV2(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t, `experiments = ["resource-apis"]`) + + checkRequest := func(method, url string) { + t.Run(method+" "+url, func(t *testing.T) { + assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}") + }) + } + + checkRequest("GET", "/v1/internal/ui/nodes") + checkRequest("GET", "/v1/internal/ui/node/web") + checkRequest("GET", "/v1/internal/ui/services") + checkRequest("GET", "/v1/internal/ui/exported-services") + checkRequest("GET", "/v1/internal/ui/catalog-overview") + checkRequest("GET", "/v1/internal/ui/gateway-services-nodes/web") + checkRequest("GET", "/v1/internal/ui/gateway-intentions/web") + checkRequest("GET", "/v1/internal/ui/service-topology/web") + checkRequest("PUT", "/v1/internal/service-virtual-ip") +} + func TestUIIndex(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/uiserver/ui_template_data.go b/agent/uiserver/ui_template_data.go index 726207b148f0c..34d3a453b0fd6 100644 --- a/agent/uiserver/ui_template_data.go +++ b/agent/uiserver/ui_template_data.go @@ -31,6 +31,14 @@ func uiTemplateDataFromConfig(cfg *config.RuntimeConfig) (map[string]interface{} uiCfg["metrics_provider_options"] = json.RawMessage(cfg.UIConfig.MetricsProviderOptionsJSON) } + v2CatalogEnabled := false + for _, experiment := range cfg.Experiments { + if experiment == "resource-apis" { + v2CatalogEnabled = true + break + } + } + d := map[string]interface{}{ "ContentPath": cfg.UIConfig.ContentPath, "ACLsEnabled": cfg.ACLsEnabled, @@ -39,6 +47,7 @@ func uiTemplateDataFromConfig(cfg *config.RuntimeConfig) (map[string]interface{} "LocalDatacenter": cfg.Datacenter, "PrimaryDatacenter": cfg.PrimaryDatacenter, "PeeringEnabled": cfg.PeeringEnabled, + "V2CatalogEnabled": v2CatalogEnabled, } // Also inject additional provider scripts if needed, otherwise strip the diff --git a/agent/uiserver/uiserver_test.go b/agent/uiserver/uiserver_test.go index c1e21ce745be3..d86baf1f48f56 100644 --- a/agent/uiserver/uiserver_test.go +++ b/agent/uiserver/uiserver_test.go @@ -13,11 +13,10 @@ import ( "strings" "testing" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" "golang.org/x/net/html" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/sdk/testutil" ) @@ -52,7 +51,8 @@ func TestUIServerIndex(t *testing.T) { "metrics_provider": "", "metrics_proxy_enabled": false, "dashboard_url_templates": null - } + }, + "V2CatalogEnabled": false }`, }, { @@ -91,7 +91,8 @@ func TestUIServerIndex(t *testing.T) { }, "metrics_proxy_enabled": false, "dashboard_url_templates": null - } + }, + "V2CatalogEnabled": false }`, }, { @@ -112,7 +113,8 @@ func TestUIServerIndex(t *testing.T) { "metrics_provider": "", "metrics_proxy_enabled": false, "dashboard_url_templates": null - } + }, + "V2CatalogEnabled": false }`, }, { @@ -133,7 +135,30 @@ func TestUIServerIndex(t *testing.T) { "metrics_provider": "", "metrics_proxy_enabled": false, "dashboard_url_templates": null - } + }, + "V2CatalogEnabled": false + }`, + }, + { + name: "v2 catalog enabled", + cfg: basicUIEnabledConfig(withV2CatalogEnabled()), + path: "/", + wantStatus: http.StatusOK, + wantContains: []string{"